All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.antlr.codegen.templates.Scala.Scala.stg Maven / Gradle / Ivy

There is a newer version: 3.5.3
Show newest version
/*
 [The "BSD license"]
 Copyright (c) 2010 Matthew Lloyd
 http://linkedin.com/in/matthewl

 All rights reserved.

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions
 are met:
 1. Redistributions of source code must retain the above copyright
    notice, this list of conditions and the following disclaimer.
 2. Redistributions in binary form must reproduce the above copyright
    notice, this list of conditions and the following disclaimer in the
    documentation and/or other materials provided with the distribution.
 3. The name of the author may not be used to endorse or promote products
    derived from this software without specific prior written permission.

 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
scalaTypeInitMap ::= [
	"Int":"0",
	"Long":"0",
	"Float":"0.0f",
	"Double":"0.0",
	"Boolean":"false",
	"Byte":"0",
	"Short":"0",
	"Char":"0",
	default:"null" // anything other than an atomic type
]

/** The overall file structure of a recognizer; stores methods for rules
 *  and cyclic DFAs plus support code.
 */
outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
           docComment, recognizer,
           name, tokens, tokenNames, rules, cyclicDFAs,
	   bitsets, buildTemplate, buildAST, rewriteMode, profile,
	   backtracking, synpreds, memoize, numRules,
	   fileName, ANTLRVersion, generatedTimestamp, trace,
	   scopes, superClass, literals) ::=
<<
// $ANTLR   


<@imports>
import org.antlr.runtime._

import org.antlr.runtime.tree._

<@end>



>>

lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="CommonToken",
      superClass="Lexer") ::= <<
object  {
     = }; separator="\n">

     
}

class (input: CharStream, state }>: RecognizerSharedState) extends <@superClassName><@end>(input, state }>) {
    import ._
    

    // delegates
     }; separator="\n">
    // delegators
     }; separator="\n">
     gParent;}>

    }>

    def this(input }>: CharStream) =
        this(input, new RecognizerSharedState()}>)



        state.ruleMemo = new Array[java.util.Map[_,_]](+1)<\n> 


         = new (input, state}>, this)}; separator="\n">
         = }; separator="\n">
        }>

    override def getGrammarFileName = ""


    

    

    }>
     = new .DFA(this)}; separator="\n">
}
>>

/** A override of Lexer.nextToken() that backtracks over mTokens() looking
 *  for matches.  No error can be generated upon error; just rewind, consume
 *  a token and then try again.  backtracking needs to be set as well.
 *  Make rule memoization happen only at levels above 1 as we start mTokens
 *  at backtracking==1.
 */
filteringNextToken() ::= <<
override def nextToken(): Token = {
    while (true) {
        if ( input.LA(1)==CharStream.EOF ) {
            var eof: Token = new CommonToken((CharStream)input,Token.EOF,
                                        Token.DEFAULT_CHANNEL,
                                        input.index(),input.index())
            eof.setLine(getLine())
            eof.setCharPositionInLine(getCharPositionInLine())
            return eof
        }
        state.token = null
	state.channel = Token.DEFAULT_CHANNEL
        state.tokenStartCharIndex = input.index()
        state.tokenStartCharPositionInLine = input.getCharPositionInLine()
        state.tokenStartLine = input.getLine()
	state.text = null
        try {
            val m = input.mark()
            state.backtracking=1 
            state.failed=false
            mTokens()
            state.backtracking=0
            
            if ( state.failed ) {
                input.rewind(m)
                input.consume() 
            }
            else {
                emit()
                return state.token
            }
        }
        catch {
            case re: RecognitionException =>
            // shouldn't happen in backtracking mode, but...
            reportError(re)
            recover(re)
        }
    }
}

override def memoize(input: IntStream,
		ruleIndex: Int,
		ruleStartIndex: Int) = {
if ( state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex)
}

override def alreadyParsedRule(input: IntStream, ruleIndex: Int):Boolean {
if ( state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex)
return false
}
>>

actionGate() ::= "state.backtracking==0"

filteringActionGate() ::= "state.backtracking==1"

/** How to generate a parser */
genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
              bitsets, inputStreamType, superClass,
              labelType, members, rewriteElementType,
              filterMode, ASTLabelType="Object") ::= <<
object  {

    val tokenNames = Array(
        "\", "\", "\", "\", 
    )<\n>


     = }; separator="\n">

     

    _in_},
                    words64=it.bits)>}>
}

class (input: , state }>: RecognizerSharedState) extends <@superClassName><@end>(input, state) {
    import ._
    // delegates
     }; separator="\n">
    // delegators
     }; separator="\n">
     gParent;}>

    }>

    <@members>
    
    def this(input }>: ) =
        this(input, new RecognizerSharedState()}>)

        
         = new (input, state}>, this)}; separator="\n">
         = .}; separator="\n">
        }>
    <@end>

    override def getTokenNames: Array[String] = tokenNames
    override def getGrammarFileName = ""

    

    


    // Delegated rules
():  = \{ return .(}; separator=", ">) \}}; separator="\n">

    }>

     = new .DFA(this)}; separator="\n">
}
>>

parserCtorBody() ::= <<


this.state.ruleMemo = new Array[java.util.Map[_,_]](+1)<\n> 


 = }; separator="\n">
>>

parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets,
       ASTLabelType="Object", superClass="Parser", labelType="Token",
       members={}) ::= <<

>>

/** How to generate a tree parser; same as parser except the input
 *  stream is a different type.
 */
treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
           numRules, bitsets, filterMode, labelType={}, ASTLabelType="Object",
           superClass={TreeRewriterTreeFilterTreeParser},
           members={}
           ) ::= <<

>>

/** A simpler version of a rule template that is specific to the imaginary
 *  rules created for syntactic predicates.  As they never have return values
 *  nor parameters etc..., just give simplest possible method.  Don't do
 *  any of the normal memoization stuff in here either; it's a waste.
 *  As predicates cannot be inlined into the invoking rule, they need to
 *  be in a rule by themselves.
 */
synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
<<
// $ANTLR start 
@throws(classOf[RecognitionException])
def _fragment(): Unit = {
    

    traceIn("_fragment", )
    try {
        
    }
    finally {
        traceOut("_fragment", );
    }

    

}
// $ANTLR end 
>>

synpred(name) ::= <<
final def (): Boolean = {
    state.backtracking+=1
    <@start()>
    val start = input.mark()
    try {
        _fragment() // can never throw exception
    } catch {
        case re: RecognitionException =>
        System.err.println("impossible: "+re)
    }
    val success = !state.failed
    input.rewind(start)
    <@stop()>
    state.backtracking-=1
    state.failed=false
    success
}<\n>
>>

lexerSynpred(name) ::= <<

>>

ruleMemoization(name) ::= <<

if ( state.backtracking>0 && alreadyParsedRule(input, ) ) { return  }

>>

/** How to test for failure and return from rule */
checkRuleBacktrackFailure() ::= <<
if (state.failed) return 
>>

/** This rule has failed, exit indicating failure during backtrack */
ruleBacktrackFailure() ::= <<
if (state.backtracking>0) {state.failed=true; return }
>>

/** How to generate code for a rule.  This includes any return type
 *  data aggregates required for multiple return values.
 */
rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<



// $ANTLR start ""
// :
@throws(classOf[RecognitionException])
final def ():  = {
    traceIn("", )
    
    
    
    
    <@preamble()>
    try {
        
        
        
        <(ruleDescriptor.actions.after):execAction()>
    }

    <\n>}>



    

    catch {
        case re: RecognitionException =>
        reportError(re)
        recover(input,re)
	<@setErrorReturnValue()>
    }<\n>



    finally {
        traceOut("", );
        
        
        
    }
    <@postamble()>
    return 
}
// $ANTLR end ""
>>

catch(decl,action) ::= <<
catch () {
    
}
>>

ruleDeclarations() ::= <<

val retval = new ()
retval.start = input.LT(1)<\n>

:  = 
}>


val _StartIndex = input.index()

>>

ruleScopeSetUp() ::= <<
_stack.push(new _scope())}; separator="\n">
_stack.push(new _scope())}; separator="\n">
>>

ruleScopeCleanUp() ::= <<
_stack.pop()}; separator="\n">
_stack.pop()}; separator="\n">
>>


ruleLabelDefs() ::= <<
<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
  ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
    :{it | var :  = null}; separator="\n"
>
<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
    :{it | var list_: java.util.List=null}; separator="\n"
>

: RuleReturnScope = null}; separator="\n">
>>

lexerRuleLabelDefs() ::= <<
<[ruleDescriptor.tokenLabels,
  ruleDescriptor.tokenListLabels,
  ruleDescriptor.ruleLabels]
    :{it | var : =null}; separator="\n"
>
;}; separator="\n">
<[ruleDescriptor.tokenListLabels,
  ruleDescriptor.ruleListLabels]
    :{it | var list_: java.util.List=null}; separator="\n"
>
>>

ruleReturnValue() ::= <<





retval



>>

ruleCleanUp() ::= <<


retval.stop = input.LT(-1)<\n>


>>

memoize() ::= <<


if ( state.backtracking>0 ) { memoize(input, , _StartIndex) }


>>

/** How to generate a rule in the lexer; naked blocks are used for
 *  fragment rules.
 */
lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
// $ANTLR start ""
@throws(classOf[RecognitionException])
final def m(): Unit = {
    traceIn("", )
    
    
    try {

        
        
        
        try <\n>

        var _type = 
        var _channel = BaseRecognizer.DEFAULT_TOKEN_CHANNEL
        
        
        
        try 
        
        state.`type` = _type
        state.channel = _channel
        <(ruleDescriptor.actions.after):execAction()>

    }
    finally {
        traceOut("", )
        
        
    }
}
// $ANTLR end ""
>>

/** How to generate code for the implicitly-defined lexer grammar rule
 *  that chooses between lexer rules.
 */
tokensRule(ruleName,nakedBlock,args,block,ruleDescriptor) ::= <<
@throws(classOf[RecognitionException])
def mTokens(): Unit = {
    <\n>
}
>>

// S U B R U L E S

/** A (...) subrule with multiple alternatives */
block(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
// :
var alt = 

<@predecision()>

<@postdecision()>
<@prebranch()>
alt match {
    }>
    case _ =>
}
<@postbranch()>
>>

/** A rule block with multiple alternatives */
ruleBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
// :
var alt = 

<@predecision()>

<@postdecision()>
alt match {
    }>
    case _ =>
}
>>

ruleBlockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
// :

<@prealt()>

<@postalt()>
>>

/** A special case of a (...) subrule with a single alternative */
blockSingleAlt(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,description) ::= <<
// :

<@prealt()>

<@postalt()>
>>

/** A (..)+ block with 1 or more alternatives */
positiveClosureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
// :
var cnt: Int = 0

<@preloop()>
var loop_quitflag = false
while (!loop_quitflag) {
    var alt:Int = 
    <@predecision()>
    
    <@postdecision()>
    alt match {
        }>
	case _ =>
	    if ( cnt >= 1 ) loop_quitflag = true
	    else {
	    
            val eee = new EarlyExitException(, input)
            <@earlyExitException()>
            throw eee
      }
    }
    cnt+=1
}
<@postloop()>
>>

positiveClosureBlockSingleAlt ::= positiveClosureBlock

/** A (..)* block with 1 or more alternatives */
closureBlock(alts,decls,decision,enclosingBlockLevel,blockLevel,decisionNumber,maxK,maxAlt,description) ::= <<
// :

<@preloop()>
var loop_quitflag = false
while (!loop_quitflag) {
    var alt:Int = 
    <@predecision()>
    
    <@postdecision()>
    alt match {
        }>
	case _ => loop_quitflag = true
    }
}
<@postloop()>
>>

closureBlockSingleAlt ::= closureBlock

/** Optional blocks (x)? are translated to (x|) by before code generation
 *  so we can just use the normal block template
 */
optionalBlock ::= block

optionalBlockSingleAlt ::= block

/** A case in a switch that jumps to an alternative given the alternative
 *  number.  A DFA predicts the alternative and then a simple switch
 *  does the jump to the code that actually matches that alternative.
 */
altSwitchCase(altNum, alt) ::= <<
case  =>
    <@prealt()>
    
>>

/** An alternative is just a list of elements; at outermost level */
alt(elements,altNum,description,autoAST,outerAlt,treeLevel,rew) ::= <<
// :
{
<@declarations()>


<@cleanup()>
}
>>

/** What to emit when there is no rewrite.  For auto build
 *  mode, does nothing.
 */
noRewrite(rewriteBlockLevel, treeLevel) ::= ""

// E L E M E N T S

/** Dump the elements one per line */
element(e) ::= <<
<@prematch()>
<\n>
>>

/** match a token optionally with a label in front */
tokenRef(token,label,elementIndex,terminalOptions) ::= <<