org.fife.ui.rsyntaxtextarea.modes.RustTokenMaker.flex Maven / Gradle / Ivy
Show all versions of rsyntaxtextarea Show documentation
/*
* RustTokenMaker.rust - Scanner for the Rust programming language.
*
* This library is distributed under a modified BSD license. See the included
* LICENSE file for details.
*/
package org.fife.ui.rsyntaxtextarea.modes;
import java.io.*;
import javax.swing.text.Segment;
import org.fife.ui.rsyntaxtextarea.*;
/**
* Scanner for the Rust programming language.
*
* This implementation was created using
* JFlex 1.4.1; however, the generated file
* was modified for performance. Memory allocation needs to be almost
* completely removed to be competitive with the handwritten lexers (subclasses
* of AbstractTokenMaker
), so this class has been modified so that
* Strings are never allocated (via yytext()), and the scanner never has to
* worry about refilling its buffer (needlessly copying chars around).
* We can achieve this because RText always scans exactly 1 line of tokens at a
* time, and hands the scanner this line as an array of characters (a Segment
* really). Since tokens contain pointers to char arrays instead of Strings
* holding their contents, there is no need for allocating new memory for
* Strings.
*
* The actual algorithm generated for scanning has, of course, not been
* modified.
*
* If you wish to regenerate this file yourself, keep in mind the following:
*
* - The generated
RustTokenMaker.rust
file will contain two
* definitions of both zzRefill
and yyreset
.
* You should hand-delete the second of each definition (the ones
* generated by the lexer), as these generated methods modify the input
* buffer, which we'll never have to do.
* - You should also change the declaration/definition of zzBuffer to NOT
* be initialized. This is a needless memory allocation for us since we
* will be pointing the array somewhere else anyway.
* - You should NOT call
yylex()
on the generated scanner
* directly; rather, you should use getTokenList
as you would
* with any other TokenMaker
instance.
*
*
* @author Robert Futrell
* @version 1.0
*
*/
%%
%public
%class RustTokenMaker
%extends AbstractJFlexCTokenMaker
%unicode
%type org.fife.ui.rsyntaxtextarea.Token
%{
/**
* Token type specifying we're in an invalid multi-line string.
*/
static final int INTERNAL_IN_STRING_INVALID = -1;
/**
* Token type specifying we're in a valid multi-line string.
*/
static final int INTERNAL_IN_STRING_VALID = -2;
/**
* Token type specifying we're in an invalid multi-line byte string.
*/
static final int INTERNAL_IN_BYTE_STRING_INVALID = -3;
/**
* Token type specifying we're in a valid multi-line byte string.
*/
static final int INTERNAL_IN_BYTE_STRING_VALID = -4;
/**
* Token type specifying we're in an invalid multi-line raw string.
*/
static final int INTERNAL_IN_RAW_STRING_INVALID = -(1<<11);
/**
* Token type specifying we're in a valid multi-line raw string.
*/
static final int INTERNAL_IN_RAW_STRING_VALID = -(2<<11);
/**
* Token type specifying we're in an invalid multi-line raw byte string.
*/
static final int INTERNAL_IN_RAW_BYTE_STRING_INVALID = -(3<<11);
/**
* Token type specifying we're in a valid multi-line raw byte string.
*/
static final int INTERNAL_IN_RAW_BYTE_STRING_VALID = -(4<<11);
/**
* When in the STRING state, whether the current string is valid.
*/
private boolean validString;
/**
* When in a raw or raw-byte state, the number of '#' signs in the delimiters.
*/
private int poundCount;
/**
* The delimiter for raw or raw-byte states.
*/
private String delimiter;
/**
* Constructor. This must be here because JFlex does not generate a
* no-parameter constructor.
*/
public RustTokenMaker() {
}
/**
* Adds the token specified to the current linked list of tokens as an
* "end token;" that is, at zzMarkedPos
.
*
* @param tokenType The token's type.
*/
private void addEndToken(int tokenType) {
addToken(zzMarkedPos,zzMarkedPos, tokenType);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param tokenType The token's type.
* @see #addToken(int, int, int)
*/
private void addHyperlinkToken(int start, int end, int tokenType) {
int so = start + offsetShift;
addToken(zzBuffer, start,end, tokenType, so, true);
}
/**
* Adds an end token that encodes the information necessary to know
* the delimiter being used for a raw or binary-raw string that spans
* multiple lines.
*
* @param endTokenState The end-token state.
*/
private void addRawStringLiteralEndToken(int endTokenState) {
addEndToken(endTokenState - poundCount);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param tokenType The token's type.
*/
private void addToken(int tokenType) {
addToken(zzStartRead, zzMarkedPos-1, tokenType);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param tokenType The token's type.
* @see #addHyperlinkToken(int, int, int)
*/
private void addToken(int start, int end, int tokenType) {
int so = start + offsetShift;
addToken(zzBuffer, start,end, tokenType, so, false);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param array The character array.
* @param start The starting offset in the array.
* @param end The ending offset in the array.
* @param tokenType The token's type.
* @param startOffset The offset in the document at which this token
* occurs.
* @param hyperlink Whether this token is a hyperlink.
*/
@Override
public void addToken(char[] array, int start, int end, int tokenType,
int startOffset, boolean hyperlink) {
super.addToken(array, start,end, tokenType, startOffset, hyperlink);
zzStartRead = zzMarkedPos;
}
String createDelimiter(int poundCount, char ch) {
if (poundCount == 0) { // Common case
return String.valueOf(ch);
}
StringBuilder sb = new StringBuilder();
sb.append(ch);
for (int i = 0; i < poundCount; i++) {
sb.append('#');
}
return sb.toString();
}
/**
* Returns the closest {@link TokenTypes} "standard" token type for a given
* "internal" token type (e.g. one whose value is < 0
).
*/
@Override
public int getClosestStandardTokenTypeForInternalType(int type) {
switch (type) {
case INTERNAL_IN_BYTE_STRING_INVALID:
case INTERNAL_IN_BYTE_STRING_VALID:
case INTERNAL_IN_RAW_BYTE_STRING_INVALID:
case INTERNAL_IN_RAW_BYTE_STRING_VALID:
case INTERNAL_IN_RAW_STRING_INVALID:
case INTERNAL_IN_RAW_STRING_VALID:
case INTERNAL_IN_STRING_INVALID:
case INTERNAL_IN_STRING_VALID:
return TokenTypes.LITERAL_STRING_DOUBLE_QUOTE;
}
return type;
}
@Override
public String[] getLineCommentStartAndEnd(int languageIndex) {
return new String[] { "//", null };
}
/**
* Returns the first token in the linked list of tokens generated
* from text
. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* text
starts.
* @return The first Token
in a linked list representing
* the syntax highlighted text.
*/
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
validString = true;
poundCount = 0;
// Start off in the proper state.
int state;
switch (initialTokenType) {
case INTERNAL_IN_BYTE_STRING_INVALID:
state = BYTE_STRING_LITERAL;
validString = false;
break;
case INTERNAL_IN_BYTE_STRING_VALID:
state = BYTE_STRING_LITERAL;
break;
case INTERNAL_IN_STRING_INVALID:
state = STRING;
validString = false;
break;
case INTERNAL_IN_STRING_VALID:
state = STRING;
break;
case TokenTypes.COMMENT_MULTILINE:
state = MLC;
break;
case TokenTypes.COMMENT_DOCUMENTATION:
state = DOCCOMMENT;
break;
default:
if (initialTokenType<-1024) { // INTERNAL_IN_* states that encode extra info
int main = -(-initialTokenType & 0x0000ff00);
switch (main) {
default: // Should never happen
case INTERNAL_IN_RAW_STRING_INVALID:
state = RAW_STRING_LITERAL;
validString = false;
poundCount = -initialTokenType & 0xff;
delimiter = createDelimiter(poundCount, '"');
break;
case INTERNAL_IN_RAW_STRING_VALID:
state = RAW_STRING_LITERAL;
poundCount = -initialTokenType & 0xff;
delimiter = createDelimiter(poundCount, '"');
break;
case INTERNAL_IN_RAW_BYTE_STRING_INVALID:
state = RAW_BYTE_STRING_LITERAL;
validString = false;
poundCount = -initialTokenType & 0xff;
delimiter = createDelimiter(poundCount, '"');
break;
case INTERNAL_IN_RAW_BYTE_STRING_VALID:
state = RAW_BYTE_STRING_LITERAL;
poundCount = -initialTokenType & 0xff;
delimiter = createDelimiter(poundCount, '"');
break;
}
}
else {
state = YYINITIAL;
}
break;
}
start = text.offset;
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
/**
* Refills the input buffer.
*
* @return true
if EOF was reached, otherwise
* false
.
*/
private boolean zzRefill() {
return zzCurrentPos>=s.offset+s.count;
}
/**
* Resets the scanner to read from a new input stream.
* Does not close the old reader.
*
* All internal variables are reset, the old input stream
* cannot be reused (internal buffer is discarded and lost).
* Lexical state is set to YY_INITIAL.
*
* @param reader the new input stream
*/
public final void yyreset(Reader reader) {
// 's' has been updated.
zzBuffer = s.array;
/*
* We replaced the line below with the two below it because zzRefill
* no longer "refills" the buffer (since the way we do it, it's always
* "full" the first time through, since it points to the segment's
* array). So, we assign zzEndRead here.
*/
//zzStartRead = zzEndRead = s.offset;
zzStartRead = s.offset;
zzEndRead = zzStartRead + s.count - 1;
zzCurrentPos = zzMarkedPos = zzPushbackPos = s.offset;
zzLexicalState = YYINITIAL;
zzReader = reader;
zzAtBOL = true;
zzAtEOF = false;
}
%}
Letter = ([A-Za-z])
LetterOrUnderscore = ({Letter}|"_")
NonzeroDigit = ([1-9])
BinaryDigit = ([0-1])
Digit = ("0"|{NonzeroDigit})
HexDigit = ({Digit}|[A-Fa-f])
OctalDigit = ([0-7])
AnyCharacterButApostropheOrBackSlash = ([^\\'])
EscapedSourceCharacter = ("u"{HexDigit}{HexDigit}{HexDigit}{HexDigit})
Escape = ("\\"(([bstnfr\"'\\])|([0123]{OctalDigit}?{OctalDigit}?)|({OctalDigit}{OctalDigit}?)|{EscapedSourceCharacter}))
NonSeparator = ([^\t\f\r\n\ \(\)\{\}\[\]\;\,\.\=\>\<\!\~\?\:\+\-\*\/\&\|\^\%\"\']|"#"|"\\")
IdentifierStart = ([:jletter:])
IdentifierPart = ([:jletterdigit:]|("\\"{EscapedSourceCharacter}))
LineTerminator = (\n)
WhiteSpace = ([ \t\f])
CharLiteral = ([\']({AnyCharacterButApostropheOrBackSlash}|{Escape})[\'])
UnclosedCharLiteral = ([\'][^\'\n]*)
ErrorCharLiteral = ({UnclosedCharLiteral}[\'])
MLCBegin = "/*"
MLCEnd = "*/"
DocCommentBegin = "/**"
LineCommentBegin = "//"
LineDocCommentBegin = "///"
DigitOrUnderscore = ({Digit}|[_])
DigitsAndUnderscoresEnd = ({DigitOrUnderscore}*{Digit})
IntegerHelper = (({NonzeroDigit}{DigitsAndUnderscoresEnd}?)|"0")
IntegerLiteral = ({IntegerHelper}[lL]?)
BinaryDigitOrUnderscore = ({BinaryDigit}|[_])
BinaryDigitsAndUnderscores = ({BinaryDigit}({BinaryDigitOrUnderscore}*{BinaryDigit})?)
BinaryLiteral = ("0"[bB]{BinaryDigitsAndUnderscores})
HexDigitOrUnderscore = ({HexDigit}|[_])
HexDigitsAndUnderscores = ({HexDigit}({HexDigitOrUnderscore}*{HexDigit})?)
OctalDigitOrUnderscore = ({OctalDigit}|[_])
OctalDigitsAndUnderscoresEnd= ({OctalDigitOrUnderscore}*{OctalDigit})
HexHelper = ("0"(([xX]{HexDigitsAndUnderscores})|({OctalDigitsAndUnderscoresEnd})))
HexLiteral = ({HexHelper}[lL]?)
FloatHelper1 = ([fFdD]?)
FloatHelper2 = ([eE][+-]?{Digit}+{FloatHelper1})
FloatLiteral1 = ({Digit}+"."({FloatHelper1}|{FloatHelper2}|{Digit}+({FloatHelper1}|{FloatHelper2})))
FloatLiteral2 = ("."{Digit}+({FloatHelper1}|{FloatHelper2}))
FloatLiteral3 = ({Digit}+{FloatHelper2})
FloatLiteral = ({FloatLiteral1}|{FloatLiteral2}|{FloatLiteral3}|({Digit}+[fFdD]))
ErrorNumberFormat = (({IntegerLiteral}|{HexLiteral}|{FloatLiteral}){NonSeparator}+)
BooleanLiteral = ("true"|"false")
Separator = ([\(\)\{\}\[\]])
Separator2 = ([\;,.])
NonAssignmentOperator = ("+"|"-"|"<="|"^"|"++"|"<"|"*"|">="|"%"|"--"|">"|"/"|"!="|"?"|">>"|"!"|"&"|"=="|":"|">>"|"~"|"|"|"&&"|">>>")
AssignmentOperator = ("="|"-="|"*="|"/="|"|="|"&="|"^="|"+="|"%="|"<<="|">>="|">>>=")
Operator = ({NonAssignmentOperator}|{AssignmentOperator})
Identifier = ({IdentifierStart}{IdentifierPart}*)
ErrorIdentifier = ({NonSeparator}+)
URLGenDelim = ([:\/\?#\[\]@])
URLSubDelim = ([\!\$&'\(\)\*\+,;=])
URLUnreserved = ({LetterOrUnderscore}|{Digit}|[\-\.\~])
URLCharacter = ({URLGenDelim}|{URLSubDelim}|{URLUnreserved}|[%])
URLCharacters = ({URLCharacter}*)
URLEndCharacter = ([\/\$]|{Letter}|{Digit})
URL = (((https?|f(tp|ile))"://"|"www.")({URLCharacters}{URLEndCharacter})?)
%state STRING
%state BYTE_STRING_LITERAL
%state RAW_STRING_LITERAL
%state RAW_BYTE_STRING_LITERAL
%state MLC
%state DOCCOMMENT
%state EOL_COMMENT
%state LINE_DOCCOMMENT
%%
{
/* Keywords */
"_" |
"abstract"|
"alignof" |
"as" |
"become" |
"box" |
"break" |
"const" |
"continue" |
"crate" |
"do" |
"dyn" |
"else" |
"enum" |
"extern" |
"final" |
"fn" |
"for" |
"if" |
"impl" |
"in" |
"let" |
"loop" |
"macro" |
"match" |
"mod" |
"move" |
"mut" |
"offsetof" |
"override" |
"priv" |
"proc" |
"pub" |
"pure" |
"ref" |
"self" |
"sizeof" |
"static" |
"struct" |
"super" |
"trait" |
"type" |
"typeof" |
"union" |
"unsafe" |
"unsized" |
"use" |
"virtual" |
"where" |
"while" |
"yield" { addToken(TokenTypes.RESERVED_WORD); }
"return" { addToken(TokenTypes.RESERVED_WORD_2); }
/* Data types. */
"i8" |
"u8" |
"i16" |
"u16" |
"i32" |
"u32" |
"i64" |
"u64" |
"i128" |
"u128" |
"isize" |
"usize" |
"f32" |
"f64" |
"bool" |
"char" |
"str" |
"tup" { addToken(TokenTypes.DATA_TYPE); }
/* Booleans. */
{BooleanLiteral} { addToken(TokenTypes.LITERAL_BOOLEAN); }
{LineTerminator} { addNullToken(); return firstToken; }
{Identifier} { addToken(TokenTypes.IDENTIFIER); }
{WhiteSpace}+ { addToken(TokenTypes.WHITESPACE); }
/* String/Character literals. */
{CharLiteral} { addToken(TokenTypes.LITERAL_CHAR); }
{UnclosedCharLiteral} { addToken(TokenTypes.ERROR_CHAR); addNullToken(); return firstToken; }
{ErrorCharLiteral} { addToken(TokenTypes.ERROR_CHAR); }
[\"] { start = zzMarkedPos-1; validString = true; yybegin(STRING); }
b[\"] { start = zzMarkedPos-2; validString = true; yybegin(BYTE_STRING_LITERAL); }
/* TODO: br(#+)?[\"] { start = zzMarkedPos-1; validString = true; yybegin(RAW_BYTE_STRING_LITERAL); } */
r[#]*[\"] { start = zzMarkedPos - yylength(); validString = true; poundCount = yylength() - 2; delimiter = createDelimiter(poundCount, '#'); yybegin(RAW_STRING_LITERAL); }
br[#]*[\"] { start = zzMarkedPos - yylength(); validString = true; poundCount = yylength() - 3; delimiter = createDelimiter(poundCount, '#'); yybegin(RAW_BYTE_STRING_LITERAL); }
/* Comment literals. */
"/**/" { addToken(TokenTypes.COMMENT_MULTILINE); }
{MLCBegin} { start = zzMarkedPos-2; yybegin(MLC); }
{DocCommentBegin} { start = zzMarkedPos-3; yybegin(DOCCOMMENT); }
{LineCommentBegin} { start = zzMarkedPos-2; yybegin(EOL_COMMENT); }
{LineDocCommentBegin} { start = zzMarkedPos-3; yybegin(LINE_DOCCOMMENT); }
/* Separators. */
{Separator} { addToken(TokenTypes.SEPARATOR); }
{Separator2} { addToken(TokenTypes.IDENTIFIER); }
/* Operators. */
{Operator} { addToken(TokenTypes.OPERATOR); }
/* Numbers */
{IntegerLiteral} { addToken(TokenTypes.LITERAL_NUMBER_DECIMAL_INT); }
{BinaryLiteral} { addToken(TokenTypes.LITERAL_NUMBER_DECIMAL_INT); }
{HexLiteral} { addToken(TokenTypes.LITERAL_NUMBER_HEXADECIMAL); }
{FloatLiteral} { addToken(TokenTypes.LITERAL_NUMBER_FLOAT); }
{ErrorNumberFormat} { addToken(TokenTypes.ERROR_NUMBER_FORMAT); }
{ErrorIdentifier} { addToken(TokenTypes.ERROR_IDENTIFIER); }
/* Ended with a line not in a string or comment. */
<> { addNullToken(); return firstToken; }
/* Catch any other (unhandled) characters and flag them as identifiers. */
. { addToken(TokenTypes.ERROR_IDENTIFIER); }
}
{
[^\\\"]+ {}
\\x{HexDigit}{2} {}
\\x { /* Invalid latin-1 character \xXX */ validString = false; }
\\u{HexDigit}{4} {}
\\u { /* Invalid Unicode character \\uXXXX */ validString = false; }
\\. { /* Skip all escaped chars. */ }
\\ { /* Skip all escape chars for now, even though this isn't right */ }
\" { int type = validString ? Token.LITERAL_STRING_DOUBLE_QUOTE : Token.ERROR_STRING_DOUBLE; addToken(start,zzStartRead, type); yybegin(YYINITIAL); }
<> {
// Strings always continue to the next line
if (validString) {
addToken(start, zzStartRead - 1, Token.LITERAL_STRING_DOUBLE_QUOTE);
addEndToken(INTERNAL_IN_STRING_VALID);
}
else {
addToken(start, zzStartRead - 1, Token.ERROR_STRING_DOUBLE);
addEndToken(INTERNAL_IN_STRING_INVALID);
}
return firstToken;
}
}
{
[^\\\"]+ {}
\\x{HexDigit}{2} {}
\\x { /* Invalid latin-1 character \xXX */ validString = false; }
\\u { /* All Unicode chars, valid or not, are not allowed in byte strings \\uXXXX */ validString = false; }
\\. { /* Skip all escaped chars. */ }
\\ { /* Skip all escape chars for now, even though this isn't right */ }
\" { int type = validString ? Token.LITERAL_STRING_DOUBLE_QUOTE : Token.ERROR_STRING_DOUBLE; addToken(start,zzStartRead, type); yybegin(YYINITIAL); }
<> {
// Strings always continue to the next line
if (validString) {
addToken(start, zzStartRead - 1, Token.LITERAL_STRING_DOUBLE_QUOTE);
addEndToken(INTERNAL_IN_BYTE_STRING_VALID);
}
else {
addToken(start, zzStartRead - 1, Token.ERROR_STRING_DOUBLE);
addEndToken(INTERNAL_IN_BYTE_STRING_INVALID);
}
return firstToken;
}
}
{
[^\"]+ {}
[\"][#]* {
// As long as the token read is at least as long as the delimiter,
// we have found our end token (too long implies just extra '#' signs for
// some reason)
int tokenLength = yylength();
if (tokenLength >= delimiter.length()) {
int type = validString ? Token.LITERAL_STRING_DOUBLE_QUOTE : Token.ERROR_STRING_DOUBLE;
int end = zzStartRead + delimiter.length();
addToken(start, end - 1, type);
zzStartRead = zzCurrentPos = zzMarkedPos = end;
yybegin(YYINITIAL);
}
// Otherwise, there weren't enough '#' signs, so we just ignore this content.
// It is part of the literal
}
<> {
// Strings always continue to the next line
if (validString) {
addToken(start, zzStartRead - 1, Token.LITERAL_STRING_DOUBLE_QUOTE);
addRawStringLiteralEndToken(INTERNAL_IN_RAW_STRING_VALID);
}
else {
addToken(start, zzStartRead - 1, Token.ERROR_STRING_DOUBLE);
addRawStringLiteralEndToken(INTERNAL_IN_RAW_STRING_INVALID);
}
return firstToken;
}
}
{
[^\"]+ {}
[\"][#]* {
// As long as the token read is at least as long as the delimiter,
// we have found our end token (too long implies just extra '#' signs for
// some reason)
int tokenLength = yylength();
if (tokenLength >= delimiter.length()) {
int type = validString ? Token.LITERAL_STRING_DOUBLE_QUOTE : Token.ERROR_STRING_DOUBLE;
int end = zzStartRead + delimiter.length();
addToken(start, end - 1, type);
zzStartRead = zzCurrentPos = zzMarkedPos = end;
yybegin(YYINITIAL);
}
// Otherwise, there weren't enough '#' signs, so we just ignore this content.
// It is part of the literal
}
<> {
// Strings always continue to the next line
if (validString) {
addToken(start, zzStartRead - 1, Token.LITERAL_STRING_DOUBLE_QUOTE);
addRawStringLiteralEndToken(INTERNAL_IN_RAW_BYTE_STRING_VALID);
}
else {
addToken(start, zzStartRead - 1, Token.ERROR_STRING_DOUBLE);
addRawStringLiteralEndToken(INTERNAL_IN_RAW_BYTE_STRING_INVALID);
}
return firstToken;
}
}
{
[^hwf\n\*]+ {}
{URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, TokenTypes.COMMENT_MULTILINE); addHyperlinkToken(temp,zzMarkedPos-1, TokenTypes.COMMENT_MULTILINE); start = zzMarkedPos; }
[hwf] {}
{MLCEnd} { yybegin(YYINITIAL); addToken(start,zzStartRead+1, TokenTypes.COMMENT_MULTILINE); }
\* {}
\n |
<> { addToken(start,zzStartRead-1, TokenTypes.COMMENT_MULTILINE); return firstToken; }
}
{
[^hwf\n\*]+ {}
{URL} {
int temp = zzStartRead;
if (start <= zzStartRead - 1) {
addToken(start,zzStartRead-1, TokenTypes.COMMENT_DOCUMENTATION);
}
addHyperlinkToken(temp,zzMarkedPos-1, TokenTypes.COMMENT_DOCUMENTATION);
start = zzMarkedPos;
}
[hwf] {}
\n { addToken(start,zzStartRead-1, TokenTypes.COMMENT_DOCUMENTATION); return firstToken; }
{MLCEnd} { yybegin(YYINITIAL); addToken(start,zzStartRead+1, TokenTypes.COMMENT_DOCUMENTATION); }
\* {}
<> { yybegin(YYINITIAL); addToken(start,zzEndRead, TokenTypes.COMMENT_DOCUMENTATION); return firstToken; }
}
{
[^hwf]+ {}
{URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, TokenTypes.COMMENT_EOL); addHyperlinkToken(temp,zzMarkedPos-1, TokenTypes.COMMENT_EOL); start = zzMarkedPos; }
[hwf] {}
<> { addToken(start,zzStartRead-1, TokenTypes.COMMENT_EOL); addNullToken(); return firstToken; }
}
{
[^hwf]+ {}
{URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, TokenTypes.COMMENT_DOCUMENTATION); addHyperlinkToken(temp,zzMarkedPos-1, TokenTypes.COMMENT_DOCUMENTATION); start = zzMarkedPos; }
[hwf] {}
<> { addToken(start,zzStartRead-1, TokenTypes.COMMENT_DOCUMENTATION); addNullToken(); return firstToken; }
}