All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.fife.ui.rsyntaxtextarea.modes.RubyTokenMaker.flex Maven / Gradle / Ivy

The newest version!
/*
 * 09/11/2008
 *
 * RubyTokenMaker.java - Scanner for Ruby
 * 
 * This library is distributed under a modified BSD license.  See the included
 * RSyntaxTextArea.License.txt file for details.
 */
package org.fife.ui.rsyntaxtextarea.modes;

import java.io.*;
import javax.swing.text.Segment;

import org.fife.ui.rsyntaxtextarea.*;


/**
 * Scanner for Ruby.

* * This implementation was created using * JFlex 1.4.1; however, the generated file * was modified for performance. Memory allocation needs to be almost * completely removed to be competitive with the handwritten lexers (subclasses * of AbstractTokenMaker, so this class has been modified so that * Strings are never allocated (via yytext()), and the scanner never has to * worry about refilling its buffer (needlessly copying chars around). * We can achieve this because RText always scans exactly 1 line of tokens at a * time, and hands the scanner this line as an array of characters (a Segment * really). Since tokens contain pointers to char arrays instead of Strings * holding their contents, there is no need for allocating new memory for * Strings.

* * The actual algorithm generated for scanning has, of course, not been * modified.

* * If you wish to regenerate this file yourself, keep in mind the following: *

    *
  • The generated RubyTokenMaker.java file will contain two * definitions of both zzRefill and yyreset. * You should hand-delete the second of each definition (the ones * generated by the lexer), as these generated methods modify the input * buffer, which we'll never have to do.
  • *
  • You should also change the declaration/definition of zzBuffer to NOT * be initialized. This is a needless memory allocation for us since we * will be pointing the array somewhere else anyway.
  • *
  • You should NOT call yylex() on the generated scanner * directly; rather, you should use getTokenList as you would * with any other TokenMaker instance.
  • *
* * @author Robert Futrell * @version 0.5 * */ %% %public %class RubyTokenMaker %extends AbstractJFlexTokenMaker %unicode %type org.fife.ui.rsyntaxtextarea.Token %{ /** * Token type specific to RubyTokenMaker; this signals that we are inside * an unquoted/double quoted/backtick EOF heredoc. */ public static final int INTERNAL_HEREDOC_EOF_UNQUOTED = -1; /** * Token type specific to RubyTokenMaker; this signals that we are inside * an single quoted EOF heredoc. */ public static final int INTERNAL_HEREDOC_EOF_SINGLE_QUOTED = -2; /** * Token type specific to RubyTokenMaker; this signals that we are inside * an double quoted EOF heredoc. */ public static final int INTERNAL_HEREDOC_EOF_DOUBLE_QUOTED = -3; /** * Token type specific to RubyTokenMaker; this signals that we are inside * an unquoted/double quoted/backtick EOT heredoc. */ public static final int INTERNAL_HEREDOC_EOT_UNQUOTED = -4; /** * Token type specific to RubyTokenMaker; this signals that we are inside * an single quoted EOT heredoc. */ public static final int INTERNAL_HEREDOC_EOT_SINGLE_QUOTED = -5; /** * Token type specific to RubyTokenMaker; this signals that we are inside * an double quoted EOT heredoc. */ public static final int INTERNAL_HEREDOC_EOT_DOUBLE_QUOTED = -6; /** * Token type specific to RubyTokenMaker; this signals that we are inside * a %Q!...! style double quoted string. */ public static final int INTERNAL_STRING_Q_BANG = -7; /** * Token type specific to RubyTokenMaker; this signals that we are inside * a %Q{...} style double quoted string. */ public static final int INTERNAL_STRING_Q_CURLY_BRACE = -8; /** * Token type specific to RubyTokenMaker; this signals that we are inside * a %Q<...> style double quoted string. */ public static final int INTERNAL_STRING_Q_LT = -9; /** * Token type specific to RubyTokenMaker; this signals that we are inside * a %Q(...) style double quoted string. */ public static final int INTERNAL_STRING_Q_PAREN = -10; /** * Token type specific to RubyTokenMaker; this signals that we are inside * a %Q/.../ style double quoted string. */ public static final int INTERNAL_STRING_Q_SLASH = -11; /** * Token type specific to RubyTokenMaker; this signals that we are inside * a %Q[...] style double quoted string. */ public static final int INTERNAL_STRING_Q_SQUARE_BRACKET = -12; /** * Constructor. This must be here because JFlex does not generate a * no-parameter constructor. */ public RubyTokenMaker() { } /** * Adds the token specified to the current linked list of tokens as an * "end token;" that is, at zzMarkedPos. * * @param tokenType The token's type. */ private void addEndToken(int tokenType) { addToken(zzMarkedPos,zzMarkedPos, tokenType); } /** * Adds the token specified to the current linked list of tokens. * * @param tokenType The token's type. */ private void addToken(int tokenType) { addToken(zzStartRead, zzMarkedPos-1, tokenType); } /** * Adds the token specified to the current linked list of tokens. * * @param tokenType The token's type. */ private void addToken(int start, int end, int tokenType) { int so = start + offsetShift; addToken(zzBuffer, start,end, tokenType, so); } /** * Adds the token specified to the current linked list of tokens. * * @param array The character array. * @param start The starting offset in the array. * @param end The ending offset in the array. * @param tokenType The token's type. * @param startOffset The offset in the document at which this token * occurs. */ @Override public void addToken(char[] array, int start, int end, int tokenType, int startOffset) { super.addToken(array, start,end, tokenType, startOffset); zzStartRead = zzMarkedPos; } /** * {@inheritDoc} */ @Override public String[] getLineCommentStartAndEnd(int languageIndex) { return new String[] { "#", null }; } /** * Returns whether tokens of the specified type should have "mark * occurrences" enabled for the current programming language. * * @param type The token type. * @return Whether tokens of this type should have "mark occurrences" * enabled. */ public boolean getMarkOccurrencesOfTokenType(int type) { return type==Token.IDENTIFIER || type==Token.VARIABLE; } /** * Returns the first token in the linked list of tokens generated * from text. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * text starts. * @return The first Token in a linked list representing * the syntax highlighted text. */ public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = Token.NULL; switch (initialTokenType) { case Token.COMMENT_DOCUMENTATION: state = DOCCOMMENT; start = text.offset; break; case Token.LITERAL_STRING_DOUBLE_QUOTE: state = STRING; start = text.offset; break; case Token.LITERAL_CHAR: state = CHAR_LITERAL; start = text.offset; break; case Token.LITERAL_BACKQUOTE: state = BACKTICKS; start = text.offset; break; case INTERNAL_HEREDOC_EOF_UNQUOTED: state = HEREDOC_EOF_UNQUOTED; start = text.offset; break; case INTERNAL_HEREDOC_EOF_SINGLE_QUOTED: state = HEREDOC_EOF_SINGLE_QUOTED; start = text.offset; break; case INTERNAL_HEREDOC_EOF_DOUBLE_QUOTED: state = HEREDOC_EOF_DOUBLE_QUOTED; start = text.offset; break; case INTERNAL_HEREDOC_EOT_UNQUOTED: state = HEREDOC_EOT_UNQUOTED; start = text.offset; break; case INTERNAL_HEREDOC_EOT_SINGLE_QUOTED: state = HEREDOC_EOT_SINGLE_QUOTED; start = text.offset; break; case INTERNAL_HEREDOC_EOT_DOUBLE_QUOTED: state = HEREDOC_EOT_DOUBLE_QUOTED; start = text.offset; break; case INTERNAL_STRING_Q_BANG: state = STRING_Q_BANG; start = text.offset; break; case INTERNAL_STRING_Q_CURLY_BRACE: state = STRING_Q_CURLY_BRACE; start = text.offset; break; case INTERNAL_STRING_Q_LT: state = STRING_Q_LT; start = text.offset; break; case INTERNAL_STRING_Q_PAREN: state = STRING_Q_PAREN; start = text.offset; break; case INTERNAL_STRING_Q_SLASH: state = STRING_Q_SLASH; start = text.offset; break; case INTERNAL_STRING_Q_SQUARE_BRACKET: state = STRING_Q_SQUARE_BRACKET; start = text.offset; break; default: state = Token.NULL; } s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } } /** * Refills the input buffer. * * @return true if EOF was reached, otherwise * false. * @exception IOException if any I/O-Error occurs. */ private boolean zzRefill() throws java.io.IOException { return zzCurrentPos>=s.offset+s.count; } /** * Resets the scanner to read from a new input stream. * Does not close the old reader. * * All internal variables are reset, the old input stream * cannot be reused (internal buffer is discarded and lost). * Lexical state is set to YY_INITIAL. * * @param reader the new input stream */ public final void yyreset(java.io.Reader reader) throws java.io.IOException { // 's' has been updated. zzBuffer = s.array; /* * We replaced the line below with the two below it because zzRefill * no longer "refills" the buffer (since the way we do it, it's always * "full" the first time through, since it points to the segment's * array). So, we assign zzEndRead here. */ //zzStartRead = zzEndRead = s.offset; zzStartRead = s.offset; zzEndRead = zzStartRead + s.count - 1; zzCurrentPos = zzMarkedPos = zzPushbackPos = s.offset; zzLexicalState = YYINITIAL; zzReader = reader; zzAtBOL = true; zzAtEOF = false; } %} Letter = [A-Za-z] NonzeroDigit = [1-9] Digit = ("0"|{NonzeroDigit}) BinaryDigit = ([01]) HexDigit = ({Digit}|[A-Fa-f]) OctalDigit = ([0-7]) NonSeparator = ([^\t\f\r\n\ \(\)\{\}\[\]\;\,\.\=\>\<\!\~\?\:\+\-\*\/\&\|\^\%\"\'\`]|"#"|"\\") IdentifierStart = ({Letter}|"_") IdentifierPart = ({IdentifierStart}|{Digit}) BooleanLiteral = ("true"|"false") LineTerminator = (\n) WhiteSpace = ([ \t\f]) LineCommentBegin = "#" DocCommentBegin = "=begin" DocCommentEnd = "=end" DigitOrUnderscore = ({Digit}|[_]) BinaryIntLiteral = ("0b"{BinaryDigit}([01_]*{BinaryDigit})?) OctalLiteral = ("0"([0-7_]*{OctalDigit})?) DecimalLiteral1 = ("0d"{Digit}({DigitOrUnderscore}*{Digit})?) DecimalLiteral2 = ({NonzeroDigit}({DigitOrUnderscore}*{Digit})?) DecimalLiteral = ({BinaryIntLiteral}|{OctalLiteral}|{DecimalLiteral1}|{DecimalLiteral2}) HexLiteral = ("0x"{HexDigit}([0-9a-zA-Z_]*{HexDigit})?) FloatLiteral = ({NonzeroDigit}({DigitOrUnderscore}*{Digit})?[Ee]({Digit}({DigitOrUnderscore}*{Digit})?)?) Separator = ([\(\)\{\}]) Operator1 = ("::"|"."|"["|"]"|"-"|"+"|"!"|"~"|"*"|"/"|"%"|"<<"|">>"|"&"|"|"|"^") Operator2 = (">"|">="|"<"|"<="|"<=>"|"=="|"==="|"!="|"=~"|"!~"|"&&"|"||") Operator3 = (".."|"..."|"="|"+="|"-="|"*="|"/="|"%=") Operator = ({Operator1}|{Operator2}|{Operator3}) Identifier = ({IdentifierStart}{IdentifierPart}*) Symbol = ([:]{Identifier}) ErrorIdentifier = ({NonSeparator}+) PreDefinedVariable = ("$"([!@&`\'+0-9~=/\,;.<>_*$?:\"]|"DEBUG"|"FILENAME"|"LOAD_PATH"|"stderr"|"stdin"|"stdout"|"VERBOSE"|([\-][0adFiIlpwv]))) Variable = ({PreDefinedVariable}|([@][@]?|[$]){Identifier}) %state STRING %state STRING_Q_BANG %state STRING_Q_CURLY_BRACE %state STRING_Q_PAREN %state STRING_Q_SLASH %state STRING_Q_SQUARE_BRACKET %state STRING_Q_LT %state CHAR_LITERAL %state BACKTICKS %state HEREDOC_EOF_UNQUOTED %state HEREDOC_EOF_SINGLE_QUOTED %state HEREDOC_EOF_DOUBLE_QUOTED %state HEREDOC_EOT_UNQUOTED %state HEREDOC_EOT_SINGLE_QUOTED %state HEREDOC_EOT_DOUBLE_QUOTED %state DOCCOMMENT %% { /* Keywords */ "alias" | "BEGIN" | "begin" | "break" | "case" | "class" | "def" | "defined" | "do" | "else" | "elsif" | "END" | "end" | "ensure" | "for" | "if" | "in" | "module" | "next" | "nil" | "redo" | "rescue" | "retry" | "return" | "self" | "super" | "then" | "undef" | "unless" | "until" | "when" | "while" | "yield" { addToken(Token.RESERVED_WORD); } "Array" | "Float" | "Integer" | "String" | "at_exit" | "autoload" | "binding" | "caller" | "catch" | "chop" | "chop!" | "chomp" | "chomp!" | "eval" | "exec" | "exit" | "exit!" | "fail" | "fork" | "format" | "gets" | "global_variables" | "gsub" | "gsub!" | "iterator?" | "lambda" | "load" | "local_variables" | "loop" | "open" | "p" | "print" | "printf" | "proc" | "putc" | "puts" | "raise" | "rand" | "readline" | "readlines" | "require" | "select" | "sleep" | "split" | "sprintf" | "srand" | "sub" | "sub!" | "syscall" | "system" | "test" | "trace_var" | "trap" | "untrace_var" { addToken(Token.FUNCTION); } "and" | "or" | "not" { addToken(Token.OPERATOR); } {BooleanLiteral} { addToken(Token.LITERAL_BOOLEAN); } {Variable} { addToken(Token.VARIABLE); } {Symbol} { addToken(Token.PREPROCESSOR); } {LineTerminator} { addNullToken(); return firstToken; } {Identifier} { addToken(Token.IDENTIFIER); } {WhiteSpace}+ { addToken(Token.WHITESPACE); } /* String/Character literals. */ \" { start = zzMarkedPos-1; yybegin(STRING); } \' { start = zzMarkedPos-1; yybegin(CHAR_LITERAL); } [\%][QqWwx]?[\(] { start = zzMarkedPos-yylength(); yybegin(STRING_Q_PAREN); } [\%][QqWwx]?[\{] { start = zzMarkedPos-yylength(); yybegin(STRING_Q_CURLY_BRACE); } [\%][QqWwx]?[\[] { start = zzMarkedPos-yylength(); yybegin(STRING_Q_SQUARE_BRACKET); } [\%][QqWwx]?[\<] { start = zzMarkedPos-yylength(); yybegin(STRING_Q_LT); } [\%][QqWwx]?[\!] { start = zzMarkedPos-yylength(); yybegin(STRING_Q_BANG); } [\%][QqWwx]?[\/] { start = zzMarkedPos-yylength(); yybegin(STRING_Q_SLASH); } \` { start = zzMarkedPos-1; yybegin(BACKTICKS); } /* Comment literals. */ {LineCommentBegin}.* { addToken(Token.COMMENT_EOL); addNullToken(); return firstToken; } {DocCommentBegin} { start = zzMarkedPos-6; yybegin(DOCCOMMENT); } /* "Here-document" syntax. This is only implemented for the common */ /* cases. */ "<> { addNullToken(); return firstToken; } /* Catch any other (unhandled) characters. */ . { addToken(Token.IDENTIFIER); } } { [^\n\\\"]+ {} \n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); return firstToken; } \\.? { /* Skip escaped chars. */ } \" { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); } <> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); return firstToken; } } { [^\n\\\!]+ {} \n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_BANG); return firstToken; } \\.? { /* Skip escaped chars. */ } \! { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); } <> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_BANG); return firstToken; } } { [^\n\\\}]+ {} \n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_CURLY_BRACE); return firstToken; } \\.? { /* Skip escaped chars. */ } \} { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); } <> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_CURLY_BRACE); return firstToken; } } { [^\n\\\>]+ {} \n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_LT); return firstToken; } \\.? { /* Skip escaped chars. */ } \> { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); } <> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_LT); return firstToken; } } { [^\n\\\)]+ {} \n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_PAREN); return firstToken; } \\.? { /* Skip escaped chars. */ } \) { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); } <> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_PAREN); return firstToken; } } { [^\n\\\/]+ {} \n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_SLASH); return firstToken; } \\.? { /* Skip escaped chars. */ } \/ { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); } <> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_SLASH); return firstToken; } } { [^\n\\\]]+ {} \n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_SQUARE_BRACKET); return firstToken; } \\.? { /* Skip escaped chars. */ } \] { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); } <> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); addEndToken(INTERNAL_STRING_Q_SQUARE_BRACKET); return firstToken; } } { [^\n\\\']+ {} \\.? { /* Skip escaped single quotes only, but this should still work. */ } \n { addToken(start,zzStartRead-1, Token.LITERAL_CHAR); return firstToken; } \' { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_CHAR); } <> { addToken(start,zzStartRead-1, Token.LITERAL_CHAR); return firstToken; } } { [^\n\\\`]+ {} \n { addToken(start,zzStartRead-1, Token.LITERAL_BACKQUOTE); return firstToken; } \\.? { /* Skip escaped chars. */ } \` { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_BACKQUOTE); } <> { addToken(start,zzStartRead-1, Token.LITERAL_BACKQUOTE); return firstToken; } } { /* NOTE: The closing "EOF" is supposed to be on a line by itself - */ /* no surrounding whitespace or other chars. However, the way */ /* we're hacking the JFLex scanning, something like ^"EOF"$ doesn't */ /* work. Fortunately we don't need the start- and end-line anchors */ /* since the production after "EOF" will match any line containing */ /* EOF and any other chars. */ /* NOTE2: This case is used for unquoted <> { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOF_UNQUOTED); return firstToken; } } { /* NOTE: The closing "EOF" is supposed to be on a line by itself - */ /* no surrounding whitespace or other chars. However, the way */ /* we're hacking the JFLex scanning, something like ^"EOF"$ doesn't */ /* work. Fortunately we don't need the start- and end-line anchors */ /* since the production after "EOF" will match any line containing */ /* EOF and any other chars. */ "EOF" { if (start==zzStartRead) { addToken(Token.PREPROCESSOR); addNullToken(); return firstToken; } } [^\n\\]+ {} \n { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOF_SINGLE_QUOTED); return firstToken; } \\.? { /* Skip escaped chars. */ } <> { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOF_SINGLE_QUOTED); return firstToken; } } { /* NOTE: The closing "EOF" is supposed to be on a line by itself - */ /* no surrounding whitespace or other chars. However, the way */ /* we're hacking the JFLex scanning, something like ^"EOF"$ doesn't */ /* work. Fortunately we don't need the start- and end-line anchors */ /* since the production after "EOF" will match any line containing */ /* EOF and any other chars. */ "EOF" { if (start==zzStartRead) { addToken(Token.PREPROCESSOR); addNullToken(); return firstToken; } } [^\n\\]+ {} \n { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOF_SINGLE_QUOTED); return firstToken; } \\.? { /* Skip escaped chars. */ } <> { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOF_SINGLE_QUOTED); return firstToken; } } { /* NOTE: The closing "EOT" is supposed to be on a line by itself - */ /* no surrounding whitespace or other chars. However, the way */ /* we're hacking the JFLex scanning, something like ^"EOT"$ doesn't */ /* work. Fortunately we don't need the start- and end-line anchors */ /* since the production after "EOT" will match any line containing */ /* EOF and any other chars. */ /* NOTE2: This case is used for unquoted <> { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOT_UNQUOTED); return firstToken; } } { /* NOTE: The closing "EOT" is supposed to be on a line by itself - */ /* no surrounding whitespace or other chars. However, the way */ /* we're hacking the JFLex scanning, something like ^"EOT"$ doesn't */ /* work. Fortunately we don't need the start- and end-line anchors */ /* since the production after "EOT" will match any line containing */ /* EOT and any other chars. */ "EOT" { if (start==zzStartRead) { addToken(Token.PREPROCESSOR); addNullToken(); return firstToken; } } [^\n\\]+ {} \n { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOT_SINGLE_QUOTED); return firstToken; } \\.? { /* Skip escaped chars. */ } <> { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOT_SINGLE_QUOTED); return firstToken; } } { /* NOTE: The closing "EOT" is supposed to be on a line by itself - */ /* no surrounding whitespace or other chars. However, the way */ /* we're hacking the JFLex scanning, something like ^"EOT"$ doesn't */ /* work. Fortunately we don't need the start- and end-line anchors */ /* since the production after "EOT" will match any line containing */ /* EOT and any other chars. */ "EOT" { if (start==zzStartRead) { addToken(Token.PREPROCESSOR); addNullToken(); return firstToken; } } [^\n\\]+ {} \n { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOT_SINGLE_QUOTED); return firstToken; } \\.? { /* Skip escaped chars. */ } <> { addToken(start,zzStartRead-1, Token.PREPROCESSOR); addEndToken(INTERNAL_HEREDOC_EOT_SINGLE_QUOTED); return firstToken; } } { [^\n\=]+ {} {DocCommentEnd} { yybegin(YYINITIAL); addToken(start,zzStartRead+3, Token.COMMENT_DOCUMENTATION); } = {} \n { addToken(start,zzStartRead-1, Token.COMMENT_DOCUMENTATION); return firstToken; } <> { yybegin(YYINITIAL); addToken(start,zzEndRead, Token.COMMENT_DOCUMENTATION); return firstToken; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy