Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
[The "BSD license"]
Copyright (c) 2010 Terence Parr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
asTypeInitMap ::= [
"int":"0",
"uint":"0",
"Number":"0.0",
"Boolean":"false",
default:"null" // anything other than an atomic type
]
/** The overall file structure of a recognizer; stores methods for rules
* and cyclic DFAs plus support code.
*/
outputFile(LEXER,PARSER,TREE_PARSER, actionScope, actions,
docComment, recognizer,
name, tokens, tokenNames, rules, cyclicDFAs,
bitsets, buildTemplate, buildAST, rewriteMode, profile,
backtracking, synpreds, memoize, numRules,
fileName, ANTLRVersion, generatedTimestamp, trace,
scopes, superClass, literals) ::=
<<
// $ANTLR
package {
<@imports>
import org.antlr.runtime.*;
import org.antlr.runtime.tree.*;
<@end>
}
>>
lexer(grammar, name, tokens, scopes, rules, numRules, filterMode, labelType="Token",
superClass="Lexer") ::= <<
public class extends <@superClassName><@end> {
:int=;}; separator="\n">
}>
// delegates
:;}; separator="\n">
// delegators
:;}; separator="\n">
;}>
public function (:, }>input:CharStream = null, state:RecognizerSharedState = null) {
super(input, state);
this.state.ruleMemo = new Array(+1);<\n>
= new (, }>this, input, this.state);}; separator="\n">
= ;}; separator="\n">
;}>
}
public override function get grammarFileName():String { return ""; }
}>
}
>>
/** A override of Lexer.nextToken() that backtracks over mTokens() looking
* for matches. No error can be generated upon error; just rewind, consume
* a token and then try again. backtracking needs to be set as well.
* Make rule memoization happen only at levels above 1 as we start mTokens
* at backtracking==1.
*/
filteringNextToken() ::= <<
public override function nextToken():Token {
while (true) {
if ( input.LA(1)==CharStreamConstants.EOF ) {
return TokenConstants.EOF_TOKEN;
}
this.state.token = null;
this.state.channel = TokenConstants.DEFAULT_CHANNEL;
this.state.tokenStartCharIndex = input.index;
this.state.tokenStartCharPositionInLine = input.charPositionInLine;
this.state.tokenStartLine = input.line;
this.state.text = null;
try {
var m:int = input.mark();
this.state.backtracking=1;
this.state.failed=false;
mTokens();
this.state.backtracking=0;
if ( this.state.failed ) {
input.rewindTo(m);
input.consume();
}
else {
emit();
return this.state.token;
}
}
catch (re:RecognitionException) {
// shouldn't happen in backtracking mode, but...
reportError(re);
recover(re);
}
}
// Not reached - For ActionScript compiler
throw new Error();
}
public override function memoize(input:IntStream,
ruleIndex:int,
ruleStartIndex:int):void
{
if ( this.state.backtracking>1 ) super.memoize(input, ruleIndex, ruleStartIndex);
}
public override function alreadyParsedRule(input:IntStream, ruleIndex:int):Boolean {
if ( this.state.backtracking>1 ) return super.alreadyParsedRule(input, ruleIndex);
return false;
}
>>
actionGate() ::= "this.state.backtracking==0"
filteringActionGate() ::= "this.state.backtracking==1"
/** How to generate a parser */
genericParser(grammar, name, scopes, tokens, tokenNames, rules, numRules,
bitsets, inputStreamType, superClass,
labelType, members, rewriteElementType,
filterMode, ASTLabelType="Object") ::= <<
public class extends <@superClassName><@end> {
public static const tokenNames:Array = [
"\", "\", "\", "\",
];<\n>
:int=;}; separator="\n">
// delegates
:;}; separator="\n">
// delegators
:;}; separator="\n">
;}>
}>
<@members>
public function (:, }>input:, state:RecognizerSharedState = null) {
super(input, state);
= new (, }>this, input, this.state);}; separator="\n">
= .;}; separator="\n">
;}>
}
<@end>
public override function get tokenNames():Array { return .tokenNames; }
public override function get grammarFileName():String { return ""; }
// Delegated rules
(): { return .(}; separator=", ">); \}}; separator="\n">
}>
_in_},
words64=it.bits)>}>
}
>>
parserCtorBody() ::= <<
this.state.ruleMemo = new Array(+1);<\n>
= ;}; separator="\n">
>>
parser(grammar, name, scopes, tokens, tokenNames, rules, numRules, bitsets, ASTLabelType="Object", superClass="Parser", labelType="Token", members={}) ::= <<
>>
/** How to generate a tree parser; same as parser except the input
* stream is a different type.
*/
treeParser(grammar, name, scopes, tokens, tokenNames, globalAction, rules,
numRules, bitsets, filterMode, labelType={}, ASTLabelType="Object",
superClass="TreeParser", members={}) ::= <<
>>
/** A simpler version of a rule template that is specific to the imaginary
* rules created for syntactic predicates. As they never have return values
* nor parameters etc..., just give simplest possible method. Don't do
* any of the normal memoization stuff in here either; it's a waste.
* As predicates cannot be inlined into the invoking rule, they need to
* be in a rule by themselves.
*/
synpredRule(ruleName, ruleDescriptor, block, description, nakedBlock) ::=
<<
// $ANTLR start
public final function _fragment():void {
traceIn("_fragment", );
try {
}
finally {
traceOut("_fragment", );
}
}
// $ANTLR end
>>
synpred(name) ::= <<
public final function ():Boolean {
this.state.backtracking++;
<@start()>
var start:int = input.mark();
try {
_fragment(); // can never throw exception
} catch (re:RecognitionException) {
trace("impossible: "+re);
}
var success:Boolean = !this.state.failed;
input.rewindTo(start);
<@stop()>
this.state.backtracking--;
this.state.failed=false;
return success;
}<\n>
>>
lexerSynpred(name) ::= <<
>>
ruleMemoization(name) ::= <<
if ( this.state.backtracking>0 && alreadyParsedRule(input, ) ) { return ; }
>>
/** How to test for failure and return from rule */
checkRuleBacktrackFailure() ::= <<
if (this.state.failed) return ;
>>
/** This rule has failed, exit indicating failure during backtrack */
ruleBacktrackFailure() ::= <<
if (this.state.backtracking>0) {this.state.failed=true; return ;}
>>
/** How to generate code for a rule. This includes any return type
* data aggregates required for multiple return values.
*/
rule(ruleName,ruleDescriptor,block,emptyRule,description,exceptions,finally,memoize) ::= <<
// $ANTLR start
// :
public final function (): {
traceIn("", );
<@preamble()>
try {
<(ruleDescriptor.actions.after):execAction()>
}
<\n>}>
catch (re:RecognitionException) {
reportError(re);
recoverStream(input,re);
<@setErrorReturnValue()>
}<\n>
finally {
traceOut("", );
}
<@postamble()>
return ;
}
// $ANTLR end
>>
catch(decl,action) ::= <<
catch () {
}
>>
ruleDeclarations() ::= <<
var retval: = new ();
retval.start = input.LT(1);<\n>
: = ;
}>
var _StartIndex:int = input.index;
>>
ruleScopeSetUp() ::= <<
_stack.push(new Object());}; separator="\n">
_stack.push(new Object());}; separator="\n">
>>
ruleScopeCleanUp() ::= <<
_stack.pop();}; separator="\n">
_stack.pop();}; separator="\n">
>>
ruleLabelDefs() ::= <<
<[ruleDescriptor.tokenLabels,ruleDescriptor.tokenListLabels,
ruleDescriptor.wildcardTreeLabels,ruleDescriptor.wildcardTreeListLabels]
:{it |var :=null;}; separator="\n"
>
<[ruleDescriptor.tokenListLabels,ruleDescriptor.ruleListLabels,ruleDescriptor.wildcardTreeListLabels]
:{it |var list_:Array=null;}; separator="\n"
>
:RuleReturnScope = null;}; separator="\n">
>>
lexerRuleLabelDefs() ::= <<
<[ruleDescriptor.tokenLabels,
ruleDescriptor.tokenListLabels,
ruleDescriptor.ruleLabels]
:{it |var :=null;}; separator="\n"
>
:int;}; separator="\n">
<[ruleDescriptor.tokenListLabels,
ruleDescriptor.ruleListLabels]
:{it |var list_:Array=null;}; separator="\n"
>
>>
ruleReturnValue() ::= <%
retval
%>
ruleCleanUp() ::= <<
retval.stop = input.LT(-1);<\n>
>>
memoize() ::= <<
if ( this.state.backtracking>0 ) { memoize(input, , _StartIndex); }
>>
/** How to generate a rule in the lexer; naked blocks are used for
* fragment rules.
*/
lexerRule(ruleName,nakedBlock,ruleDescriptor,block,memoize) ::= <<
// $ANTLR start
public final function m():void {
traceIn("", );
try {
<\n>
var _type:int = ;
var _channel:int = DEFAULT_TOKEN_CHANNEL;
this.state.type = _type;
this.state.channel = _channel;
<(ruleDescriptor.actions.after):execAction()>
}
finally {
traceOut("