All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.fife.ui.rsyntaxtextarea.modes.DTokenMaker.flex Maven / Gradle / Ivy

Go to download

RSyntaxTextArea is the syntax highlighting text editor for Swing applications. Features include syntax highlighting for 40+ languages, code folding, code completion, regex find and replace, macros, code templates, undo/redo, line numbering and bracket matching.

There is a newer version: 3.5.1
Show newest version
/*
 * 09/08/2014
 *
 * DTokenMaker.java - Scanner for the D programming language.
 * 
 * This library is distributed under a modified BSD license.  See the included
 * RSyntaxTextArea.License.txt file for details.
 */
package org.fife.ui.rsyntaxtextarea.modes;

import java.io.*;
import javax.swing.text.Segment;

import org.fife.ui.rsyntaxtextarea.*;


/**
 * Scanner for the D programming language.

* * This implementation was created using * JFlex 1.4.1; however, the generated file * was modified for performance. Memory allocation needs to be almost * completely removed to be competitive with the handwritten lexers (subclasses * of AbstractTokenMaker, so this class has been modified so that * Strings are never allocated (via yytext()), and the scanner never has to * worry about refilling its buffer (needlessly copying chars around). * We can achieve this because RText always scans exactly 1 line of tokens at a * time, and hands the scanner this line as an array of characters (a Segment * really). Since tokens contain pointers to char arrays instead of Strings * holding their contents, there is no need for allocating new memory for * Strings.

* * The actual algorithm generated for scanning has, of course, not been * modified.

* * If you wish to regenerate this file yourself, keep in mind the following: *

    *
  • The generated DTokenMaker.java file will contain two * definitions of both zzRefill and yyreset. * You should hand-delete the second of each definition (the ones * generated by the lexer), as these generated methods modify the input * buffer, which we'll never have to do.
  • *
  • You should also change the declaration/definition of zzBuffer to NOT * be initialized. This is a needless memory allocation for us since we * will be pointing the array somewhere else anyway.
  • *
  • You should NOT call yylex() on the generated scanner * directly; rather, you should use getTokenList as you would * with any other TokenMaker instance.
  • *
* * @author Robert Futrell * @version 1.0 * */ %% %public %class DTokenMaker %extends AbstractJFlexCTokenMaker %unicode %type org.fife.ui.rsyntaxtextarea.Token %{ /** * Token type specific to DTokenMaker; this signals that the user is in * a nestable multi-line comment. The nested depth is embedded in the * actual end token type. */ private static final int INTERNAL_IN_NESTABLE_MLC = -(1<<11); /** * When in a (possibly) nested MLC, this is the current nested depth. */ private int nestedMlcDepth; /** * Constructor. This must be here because JFlex does not generate a * no-parameter constructor. */ public DTokenMaker() { } /** * Adds the token specified to the current linked list of tokens as an * "end token;" that is, at zzMarkedPos. * * @param tokenType The token's type. */ private void addEndToken(int tokenType) { addToken(zzMarkedPos,zzMarkedPos, tokenType); } private void addNestedMlcEndToken() { addEndToken(INTERNAL_IN_NESTABLE_MLC - nestedMlcDepth); } /** * Adds the token specified to the current linked list of tokens. * * @param tokenType The token's type. * @see #addToken(int, int, int) */ private void addHyperlinkToken(int start, int end, int tokenType) { int so = start + offsetShift; addToken(zzBuffer, start,end, tokenType, so, true); } /** * Adds the token specified to the current linked list of tokens. * * @param tokenType The token's type. */ private void addToken(int tokenType) { addToken(zzStartRead, zzMarkedPos-1, tokenType); } /** * Adds the token specified to the current linked list of tokens. * * @param tokenType The token's type. * @see #addHyperlinkToken(int, int, int) */ private void addToken(int start, int end, int tokenType) { int so = start + offsetShift; addToken(zzBuffer, start,end, tokenType, so, false); } /** * Adds the token specified to the current linked list of tokens. * * @param array The character array. * @param start The starting offset in the array. * @param end The ending offset in the array. * @param tokenType The token's type. * @param startOffset The offset in the document at which this token * occurs. * @param hyperlink Whether this token is a hyperlink. */ @Override public void addToken(char[] array, int start, int end, int tokenType, int startOffset, boolean hyperlink) { super.addToken(array, start,end, tokenType, startOffset, hyperlink); zzStartRead = zzMarkedPos; } @Override public int getClosestStandardTokenTypeForInternalType(int type) { switch (type) { case INTERNAL_IN_NESTABLE_MLC: return TokenTypes.COMMENT_MULTILINE; } return type; } /** * {@inheritDoc} */ @Override public String[] getLineCommentStartAndEnd(int languageIndex) { return new String[] { "//", null }; } /** * Returns the first token in the linked list of tokens generated * from text. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * text starts. * @return The first Token in a linked list representing * the syntax highlighted text. */ public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; nestedMlcDepth = 0; // Start off in the proper state. int state = YYINITIAL; switch (initialTokenType) { case Token.LITERAL_BACKQUOTE: state = WYSIWYG_STRING_2; break; case Token.LITERAL_STRING_DOUBLE_QUOTE: state = WYSIWYG_STRING_1; break; case Token.COMMENT_MULTILINE: state = MLC; break; case Token.COMMENT_DOCUMENTATION: state = DOCCOMMENT; break; case INTERNAL_IN_NESTABLE_MLC: state = NESTABLE_MLC; break; default: if (initialTokenType<-1024) { int main = -(-initialTokenType & 0xffffff00); switch (main) { default: // Should never happen case INTERNAL_IN_NESTABLE_MLC: state = NESTABLE_MLC; break; } nestedMlcDepth = -initialTokenType&0xff; } else { state = YYINITIAL; } } start = text.offset; s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } } /** * Refills the input buffer. * * @return true if EOF was reached, otherwise * false. * @exception IOException if any I/O-Error occurs. */ private boolean zzRefill() { return zzCurrentPos>=s.offset+s.count; } /** * Resets the scanner to read from a new input stream. * Does not close the old reader. * * All internal variables are reset, the old input stream * cannot be reused (internal buffer is discarded and lost). * Lexical state is set to YY_INITIAL. * * @param reader the new input stream */ public final void yyreset(Reader reader) { // 's' has been updated. zzBuffer = s.array; /* * We replaced the line below with the two below it because zzRefill * no longer "refills" the buffer (since the way we do it, it's always * "full" the first time through, since it points to the segment's * array). So, we assign zzEndRead here. */ //zzStartRead = zzEndRead = s.offset; zzStartRead = s.offset; zzEndRead = zzStartRead + s.count - 1; zzCurrentPos = zzMarkedPos = zzPushbackPos = s.offset; zzLexicalState = YYINITIAL; zzReader = reader; zzAtBOL = true; zzAtEOF = false; } %} Letter = ([A-Za-z]) LetterOrUnderscore = ({Letter}|"_") NonzeroDigit = ([1-9]) BinaryDigit = ([0-1]) Digit = ("0"|{NonzeroDigit}) HexDigit = ({Digit}|[A-Fa-f]) OctalDigit = ([0-7]) AnyCharacterButApostropheOrBackSlash = ([^\\']) AnyCharacterButDoubleQuoteOrBackSlash = ([^\\\"\n]) EscapedSourceCharacter = ("u"{HexDigit}{HexDigit}{HexDigit}{HexDigit}) Escape = ("\\"(([btnfr\"'\\])|([0123]{OctalDigit}?{OctalDigit}?)|({OctalDigit}{OctalDigit}?)|{EscapedSourceCharacter})) NonSeparator = ([^\t\f\r\n\ \(\)\{\}\[\]\;\,\.\=\>\<\!\~\?\:\+\-\*\/\&\|\^\%\"\'\`]|"#"|"\\") IdentifierStart = ({LetterOrUnderscore}|"$") IdentifierPart = ({IdentifierStart}|{Digit}|("\\"{EscapedSourceCharacter})) LineTerminator = (\n) WhiteSpace = ([ \t\f]) StringPostfix = ([cwd]) CharLiteral = ([\']({AnyCharacterButApostropheOrBackSlash}|{Escape})[\']{StringPostfix}?) UnclosedCharLiteral = ([\'][^\'\n]*) ErrorCharLiteral = ({UnclosedCharLiteral}[\']{StringPostfix}?) StringLiteral = ([\"]({AnyCharacterButDoubleQuoteOrBackSlash}|{Escape})*[\"]{StringPostfix}?) UnclosedStringLiteral = ([\"]([\\].|[^\\\"])*[^\"]?) HexStringLiteral = ([x]{StringLiteral}) UnclosedHexStringLiteral = ([x]{UnclosedStringLiteral}) ErrorStringLiteral = ({UnclosedStringLiteral}[\"]{StringPostfix}?) WysiwygStringLiteralStart = ("r"[\"]) WysiwygStringLiteralStart2 = ("`") MLCBegin = "/*" MLCEnd = "*/" DocCommentBegin = "/**" NestableMLCBegin = "/+" LineCommentBegin = "//" LineDocCommentBegin = "///" DigitOrUnderscore = ({Digit}|[_]) DigitsAndUnderscoresEnd = ({DigitOrUnderscore}*{Digit}) IntegerHelper = (({NonzeroDigit}{DigitsAndUnderscoresEnd}?)|"0") IntegerLiteral = ({IntegerHelper}[lL]?) BinaryDigitOrUnderscore = ({BinaryDigit}|[_]) BinaryDigitsAndUnderscores = ({BinaryDigit}({BinaryDigitOrUnderscore}*{BinaryDigit})?) BinaryLiteral = ("0"[bB]{BinaryDigitsAndUnderscores}) HexDigitOrUnderscore = ({HexDigit}|[_]) HexDigitsAndUnderscores = ({HexDigit}({HexDigitOrUnderscore}*{HexDigit})?) OctalDigitOrUnderscore = ({OctalDigit}|[_]) OctalDigitsAndUnderscoresEnd= ({OctalDigitOrUnderscore}*{OctalDigit}) HexHelper = ("0"(([xX]{HexDigitsAndUnderscores})|({OctalDigitsAndUnderscoresEnd}))) NonFloatSuffix = (([uU][lL]?)|([lL][uU]?)) HexLiteral = ({HexHelper}{NonFloatSuffix}?) FloatHelper1 = ([fFdD]?) FloatHelper2 = ([eE][+-]?{Digit}+{FloatHelper1}) FloatLiteral1 = ({Digit}+"."({FloatHelper1}|{FloatHelper2}|{Digit}+({FloatHelper1}|{FloatHelper2}))) FloatLiteral2 = ("."{Digit}+({FloatHelper1}|{FloatHelper2})) FloatLiteral3 = ({Digit}+{FloatHelper2}) FloatLiteral = ({FloatLiteral1}|{FloatLiteral2}|{FloatLiteral3}|({Digit}+[fFdD])) ErrorNumberFormat = (({IntegerLiteral}|{HexLiteral}|{FloatLiteral}){NonSeparator}+) BooleanLiteral = ("true"|"false") Separator = ([\(\)\{\}\[\]]) Separator2 = ([\;,.]) NonAssignmentOperator = ("+"|"-"|"<="|"^"|"++"|"<"|"*"|">="|"%"|"--"|">"|"/"|"!="|"?"|">>"|"!"|"&"|"=="|":"|">>"|"~"|"|"|"&&"|">>>") AssignmentOperator = ("="|"-="|"*="|"/="|"|="|"&="|"^="|"+="|"%="|"<<="|">>="|">>>=") Operator = ({NonAssignmentOperator}|{AssignmentOperator}) Identifier = ({IdentifierStart}{IdentifierPart}*) ErrorIdentifier = ({NonSeparator}+) Annotation = ("@"{Identifier}?) URLGenDelim = ([:\/\?#\[\]@]) URLSubDelim = ([\!\$&'\(\)\*\+,;=]) URLUnreserved = ({LetterOrUnderscore}|{Digit}|[\-\.\~]) URLCharacter = ({URLGenDelim}|{URLSubDelim}|{URLUnreserved}|[%]) URLCharacters = ({URLCharacter}*) URLEndCharacter = ([\/\$]|{Letter}|{Digit}) URL = (((https?|f(tp|ile))"://"|"www.")({URLCharacters}{URLEndCharacter})?) %state MLC %state DOCCOMMENT %state NESTABLE_MLC %state EOL_COMMENT %state EOL_DOCCOMMENT %state WYSIWYG_STRING_1 %state WYSIWYG_STRING_2 %% { /* Keywords */ "abstract" | "alias" | "align" | "asm" | "assert" | "auto" | "body" | "break" | "case" | "cast" | "catch" | "class" | "const" | "continue" | "debug" | "default" | "delegate" | "delete" | "deprecated" | "do" | "else" | "enum" | "export" | "extern" | "final" | "finally" | "for" | "foreach" | "foreach_reverse" | "function" | "goto" | "if" | "immutable" | "import" | "in" | "inout" | "interface" | "invariant" | "is" | "lazy" | "macro" | "mixin" | "module" | "new" | "nothrow" | "null" | "out" | "override" | "package" | "pragma" | "private" | "protected" | "public" | "pure" | "ref" | "scope" | "shared" | "static" | "struct" | "super" | "switch" | "synchronized" | "template" | "this" | "throw" | "try" | "typedef" | "typeid" | "typeof" | "union" | "unittest" | "version" | "void" | "volatile" | "while" | "with" | "__FILE__" | "__MODULE__" | "__LINE__" | "__FUNCTION__" | "__PRETTY_FUNCTION__" | "__gshared" | "__traits" | "__vector" | "__parameters" { addToken(Token.RESERVED_WORD); } "return" { addToken(Token.RESERVED_WORD_2); } /* Data types. */ "string" | "wstring" | "dstring" | "size_t" | "ptrdiff_t" | "bool" | "byte" | "cdouble" | "cent" | "cfloat" | "char" | "creal" | "dchar" | "double" | "float" | "idouble" | "ifloat" | "ireal" | "int" | "long" | "real" | "short" | "ubyte" | "ucent" | "uint" | "ulong" | "ushort" | "wchar" { addToken(Token.DATA_TYPE); } /* Booleans. */ {BooleanLiteral} { addToken(Token.LITERAL_BOOLEAN); } /* Standard library (TODO) */ {LineTerminator} { addNullToken(); return firstToken; } {Identifier} { addToken(Token.IDENTIFIER); } {WhiteSpace}+ { addToken(Token.WHITESPACE); } /* String/Character literals. */ {CharLiteral} { addToken(Token.LITERAL_CHAR); } {UnclosedCharLiteral} { addToken(Token.ERROR_CHAR); addNullToken(); return firstToken; } {ErrorCharLiteral} { addToken(Token.ERROR_CHAR); } {StringLiteral} { addToken(Token.LITERAL_STRING_DOUBLE_QUOTE); } {UnclosedStringLiteral} { addToken(Token.ERROR_STRING_DOUBLE); addNullToken(); return firstToken; } {ErrorStringLiteral} { addToken(Token.ERROR_STRING_DOUBLE); } {HexStringLiteral} { addToken(Token.LITERAL_STRING_DOUBLE_QUOTE); } {UnclosedHexStringLiteral} { addToken(Token.ERROR_STRING_DOUBLE); addNullToken(); return firstToken; } {WysiwygStringLiteralStart} { addToken(Token.LITERAL_STRING_DOUBLE_QUOTE); yybegin(WYSIWYG_STRING_1); } {WysiwygStringLiteralStart2} { addToken(Token.LITERAL_BACKQUOTE); yybegin(WYSIWYG_STRING_2); } /* Comment literals. */ "/**/" { addToken(Token.COMMENT_MULTILINE); } {MLCBegin} { start = zzMarkedPos-2; yybegin(MLC); } {DocCommentBegin} { start = zzMarkedPos-3; yybegin(DOCCOMMENT); } {NestableMLCBegin} { start = zzMarkedPos-2; nestedMlcDepth = 1; yybegin(NESTABLE_MLC); } {LineCommentBegin} { start = zzMarkedPos-2; yybegin(EOL_COMMENT); } {LineDocCommentBegin} { start = zzMarkedPos-3; yybegin(EOL_DOCCOMMENT); } /* Annotations. */ {Annotation} { addToken(Token.ANNOTATION); } /* Separators. */ {Separator} { addToken(Token.SEPARATOR); } {Separator2} { addToken(Token.IDENTIFIER); } /* Operators. */ {Operator} { addToken(Token.OPERATOR); } /* Numbers */ {IntegerLiteral} { addToken(Token.LITERAL_NUMBER_DECIMAL_INT); } {BinaryLiteral} { addToken(Token.LITERAL_NUMBER_DECIMAL_INT); } {HexLiteral} { addToken(Token.LITERAL_NUMBER_HEXADECIMAL); } {FloatLiteral} { addToken(Token.LITERAL_NUMBER_FLOAT); } {ErrorNumberFormat} { addToken(Token.ERROR_NUMBER_FORMAT); } {ErrorIdentifier} { addToken(Token.ERROR_IDENTIFIER); } /* Ended with a line not in a string or comment. */ <> { addNullToken(); return firstToken; } /* Catch any other (unhandled) characters and flag them as identifiers. */ . { addToken(Token.ERROR_IDENTIFIER); } } { [^hwf\n\*]+ {} {URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, Token.COMMENT_MULTILINE); addHyperlinkToken(temp,zzMarkedPos-1, Token.COMMENT_MULTILINE); start = zzMarkedPos; } [hwf] {} {MLCEnd} { yybegin(YYINITIAL); addToken(start,zzStartRead+1, Token.COMMENT_MULTILINE); } \* {} \n | <> { addToken(start,zzStartRead-1, Token.COMMENT_MULTILINE); return firstToken; } } { [^hwf\n\*]+ {} {URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, Token.COMMENT_DOCUMENTATION); addHyperlinkToken(temp,zzMarkedPos-1, Token.COMMENT_DOCUMENTATION); start = zzMarkedPos; } [hwf] {} {MLCEnd} { yybegin(YYINITIAL); addToken(start,zzStartRead+1, Token.COMMENT_DOCUMENTATION); } \* {} \n | <> { yybegin(YYINITIAL); addToken(start,zzEndRead, Token.COMMENT_DOCUMENTATION); return firstToken; } } { [^hwf\n\+\/]+ {} {URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, Token.COMMENT_MULTILINE); addHyperlinkToken(temp,zzMarkedPos-1, Token.COMMENT_MULTILINE); start = zzMarkedPos; } [hwf] {} {NestableMLCBegin} { nestedMlcDepth++; } "/" {} "+/" { System.out.println("... " + nestedMlcDepth); if (--nestedMlcDepth==0) { addToken(start,zzStartRead+1, Token.COMMENT_MULTILINE); yybegin(YYINITIAL); } } \+ {} \n | <> { addToken(start,zzStartRead-1, Token.COMMENT_MULTILINE); addNestedMlcEndToken(); return firstToken; } } { [^hwf\n]+ {} {URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, Token.COMMENT_EOL); addHyperlinkToken(temp,zzMarkedPos-1, Token.COMMENT_EOL); start = zzMarkedPos; } [hwf] {} \n | <> { addToken(start,zzStartRead-1, Token.COMMENT_EOL); addNullToken(); return firstToken; } } { [^hwf\n]+ {} {URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, Token.COMMENT_DOCUMENTATION); addHyperlinkToken(temp,zzMarkedPos-1, Token.COMMENT_DOCUMENTATION); start = zzMarkedPos; } [hwf] {} \n | <> { addToken(start,zzStartRead-1, Token.COMMENT_DOCUMENTATION); addNullToken(); return firstToken; } } { [^\"]+ { addToken(Token.LITERAL_STRING_DOUBLE_QUOTE); } \" { addToken(Token.LITERAL_STRING_DOUBLE_QUOTE); yybegin(YYINITIAL); } <> { if (firstToken==null) { addToken(Token.LITERAL_STRING_DOUBLE_QUOTE); } return firstToken; } } { [^\`]+ { addToken(Token.LITERAL_BACKQUOTE); } \` { addToken(Token.LITERAL_BACKQUOTE); yybegin(YYINITIAL); } <> { if (firstToken==null) { addToken(Token.LITERAL_BACKQUOTE); } return firstToken; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy