org.fife.ui.rsyntaxtextarea.modes.LispTokenMaker.flex Maven / Gradle / Ivy
/*
* 11/13/2004
*
* LispTokenMaker.java - Scanner for the Lisp programming language.
*
* This library is distributed under a modified BSD license. See the included
* LICENSE file for details.
*/
package org.fife.ui.rsyntaxtextarea.modes;
import java.io.*;
import javax.swing.text.Segment;
import org.fife.ui.rsyntaxtextarea.*;
/**
* Scanner for the Lisp programming language.
*
* This implementation was created using
* JFlex 1.4.1; however, the generated file
* was modified for performance. Memory allocation needs to be almost
* completely removed to be competitive with the handwritten lexers (subclasses
* of AbstractTokenMaker
, so this class has been modified so that
* Strings are never allocated (via yytext()), and the scanner never has to
* worry about refilling its buffer (needlessly copying chars around).
* We can achieve this because RText always scans exactly 1 line of tokens at a
* time, and hands the scanner this line as an array of characters (a Segment
* really). Since tokens contain pointers to char arrays instead of Strings
* holding their contents, there is no need for allocating new memory for
* Strings.
*
* The actual algorithm generated for scanning has, of course, not been
* modified.
*
* If you wish to regenerate this file yourself, keep in mind the following:
*
* - The generated
LispTokenMaker.java
file will contain two
* definitions of both zzRefill
and yyreset
.
* You should hand-delete the second of each definition (the ones
* generated by the lexer), as these generated methods modify the input
* buffer, which we'll never have to do.
* - You should also change the declaration/definition of zzBuffer to NOT
* be initialized. This is a needless memory allocation for us since we
* will be pointing the array somewhere else anyway.
* - You should NOT call
yylex()
on the generated scanner
* directly; rather, you should use getTokenList
as you would
* with any other TokenMaker
instance.
*
*
* @author Robert Futrell
* @version 0.5
*
*/
%%
%public
%class LispTokenMaker
%extends AbstractJFlexTokenMaker
%unicode
%type org.fife.ui.rsyntaxtextarea.Token
%{
/**
* Constructor. This must be here because JFlex does not generate a
* no-parameter constructor.
*/
public LispTokenMaker() {
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param tokenType The token's type.
* @see #addToken(int, int, int)
*/
private void addHyperlinkToken(int start, int end, int tokenType) {
int so = start + offsetShift;
addToken(zzBuffer, start,end, tokenType, so, true);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param tokenType The token's type.
*/
private void addToken(int tokenType) {
addToken(zzStartRead, zzMarkedPos-1, tokenType);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param tokenType The token's type.
* @see #addHyperlinkToken(int, int, int)
*/
private void addToken(int start, int end, int tokenType) {
int so = start + offsetShift;
addToken(zzBuffer, start,end, tokenType, so, false);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param array The character array.
* @param start The starting offset in the array.
* @param end The ending offset in the array.
* @param tokenType The token's type.
* @param startOffset The offset in the document at which this token
* occurs.
* @param hyperlink Whether this token is a hyperlink.
*/
@Override
public void addToken(char[] array, int start, int end, int tokenType,
int startOffset, boolean hyperlink) {
super.addToken(array, start,end, tokenType, startOffset, hyperlink);
zzStartRead = zzMarkedPos;
}
/**
* {@inheritDoc}
*/
@Override
public String[] getLineCommentStartAndEnd(int languageIndex) {
return new String[] { ";", null };
}
/**
* Returns the first token in the linked list of tokens generated
* from text
. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* text
starts.
* @return The first Token
in a linked list representing
* the syntax highlighted text.
*/
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = Token.NULL;
switch (initialTokenType) {
case Token.COMMENT_MULTILINE:
state = MLC;
start = text.offset;
break;
case Token.LITERAL_STRING_DOUBLE_QUOTE:
state = STRING;
start = text.offset;
break;
default:
state = Token.NULL;
}
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
/**
* Refills the input buffer.
*
* @return true
if EOF was reached, otherwise
* false
.
*/
private boolean zzRefill() {
return zzCurrentPos>=s.offset+s.count;
}
/**
* Resets the scanner to read from a new input stream.
* Does not close the old reader.
*
* All internal variables are reset, the old input stream
* cannot be reused (internal buffer is discarded and lost).
* Lexical state is set to YY_INITIAL.
*
* @param reader the new input stream
*/
public final void yyreset(Reader reader) {
// 's' has been updated.
zzBuffer = s.array;
/*
* We replaced the line below with the two below it because zzRefill
* no longer "refills" the buffer (since the way we do it, it's always
* "full" the first time through, since it points to the segment's
* array). So, we assign zzEndRead here.
*/
//zzStartRead = zzEndRead = s.offset;
zzStartRead = s.offset;
zzEndRead = zzStartRead + s.count - 1;
zzCurrentPos = zzMarkedPos = zzPushbackPos = s.offset;
zzLexicalState = YYINITIAL;
zzReader = reader;
zzAtBOL = true;
zzAtEOF = false;
}
%}
Letter = [A-Za-z]
LetterOrUnderscore = ({Letter}|"_")
NonzeroDigit = [1-9]
Digit = ("0"|{NonzeroDigit})
HexDigit = ({Digit}|[A-Fa-f])
OctalDigit = ([0-7])
EscapedSourceCharacter = ("u"{HexDigit}{HexDigit}{HexDigit}{HexDigit})
NonSeparator = ([^\t\f\r\n\ \(\)\{\}\[\]\;\,\.\=\>\<\!\~\?\:\+\-\*\/\&\|\^\%\"\']|"#"|"\\")
IdentifierStart = ({LetterOrUnderscore}|"$")
IdentifierPart = ({IdentifierStart}|{Digit}|("\\"{EscapedSourceCharacter}))
LineTerminator = (\n)
WhiteSpace = ([ \t\f])
MLCBegin = "#|"
MLCEnd = "|#"
LineCommentBegin = ";"
IntegerHelper1 = (({NonzeroDigit}{Digit}*)|"0")
IntegerHelper2 = ("0"(([xX]{HexDigit}+)|({OctalDigit}*)))
IntegerLiteral = ({IntegerHelper1}[lL]?)
HexLiteral = ({IntegerHelper2}[lL]?)
FloatHelper1 = ([fFdD]?)
FloatHelper2 = ([eE][+-]?{Digit}+{FloatHelper1})
FloatLiteral1 = ({Digit}+"."({FloatHelper1}|{FloatHelper2}|{Digit}+({FloatHelper1}|{FloatHelper2})))
FloatLiteral2 = ("."{Digit}+({FloatHelper1}|{FloatHelper2}))
FloatLiteral3 = ({Digit}+{FloatHelper2})
FloatLiteral = ({FloatLiteral1}|{FloatLiteral2}|{FloatLiteral3}|({Digit}+[fFdD]))
ErrorNumberFormat = (({IntegerLiteral}|{HexLiteral}|{FloatLiteral}){NonSeparator}+)
Separator = ([\(\)])
NonAssignmentOperator = ("+"|"-"|"<="|"^"|"++"|"<"|"*"|">="|"%"|"--"|">"|"/"|"!="|"?"|">>"|"!"|"&"|"=="|":"|">>"|"~"|"|"|"&&"|">>>")
AssignmentOperator = ("="|"-="|"*="|"/="|"|="|"&="|"^="|"+="|"%="|"<<="|">>="|">>>=")
Operator = ({NonAssignmentOperator}|{AssignmentOperator})
Identifier = ({IdentifierStart}{IdentifierPart}*)
ErrorIdentifier = ({NonSeparator}+)
URLGenDelim = ([:\/\?#\[\]@])
URLSubDelim = ([\!\$&'\(\)\*\+,;=])
URLUnreserved = ({LetterOrUnderscore}|{Digit}|[\-\.\~])
URLCharacter = ({URLGenDelim}|{URLSubDelim}|{URLUnreserved}|[%])
URLCharacters = ({URLCharacter}*)
URLEndCharacter = ([\/\$]|{Letter}|{Digit})
URL = (((https?|f(tp|ile))"://"|"www.")({URLCharacters}{URLEndCharacter})?)
%state STRING
%state MLC
%state EOL_COMMENT
%%
{
"defclass" |
"defconstant" |
"defgeneric" |
"define-compiler-macro" |
"define-condition" |
"define-method-combination" |
"define-modify-macro" |
"define-setf-expander" |
"define-symbol-macro" |
"defmacro" |
"defmethod" |
"defpackage" |
"defparameter" |
"defsetf" |
"defstruct" |
"deftype" |
"defun" |
"defvar" { addToken(Token.RESERVED_WORD); }
"abort" |
"assert" |
"block" |
"break" |
"case" |
"catch" |
"ccase" |
"cerror" |
"cond" |
"ctypecase" |
"declaim" |
"declare" |
"do" |
"do*" |
"do-all-symbols" |
"do-external-symbols" |
"do-symbols" |
"dolist" |
"dotimes" |
"ecase" |
"error" |
"etypecase" |
"eval-when" |
"flet" |
"handler-bind" |
"handler-case" |
"if" |
"ignore-errors" |
"in-package" |
"labels" |
"lambda" |
"let" |
"let*" |
"locally" |
"loop" |
"macrolet" |
"multiple-value-bind" |
"proclaim" |
"prog" |
"prog*" |
"prog1" |
"prog2" |
"progn" |
"progv" |
"provide" |
"require" |
"restart-bind" |
"restart-case" |
"restart-name" |
"return" |
"return-from" |
"signal" |
"symbol-macrolet" |
"tagbody" |
"the" |
"throw" |
"typecase" |
"unless" |
"unwind-protect" |
"when" |
"with-accessors" |
"with-compilation-unit" |
"with-condition-restarts" |
"with-hash-table-iterator" |
"with-input-from-string" |
"with-open-file" |
"with-open-stream" |
"with-output-to-string" |
"with-package-iterator" |
"with-simple-restart" |
"with-slots" |
"with-standard-io-syntax" { addToken(Token.RESERVED_WORD); }
{LineTerminator} { addNullToken(); return firstToken; }
{Identifier} { addToken(Token.IDENTIFIER); }
{WhiteSpace}+ { addToken(Token.WHITESPACE); }
[\"] { start = zzMarkedPos-1; yybegin(STRING); }
{MLCBegin} { start = zzMarkedPos-2; yybegin(MLC); }
{LineCommentBegin} { start = zzMarkedPos-1; yybegin(EOL_COMMENT); }
{Separator} { addToken(Token.SEPARATOR); }
{Operator} { addToken(Token.OPERATOR); }
/* Numbers */
{IntegerLiteral} { addToken(Token.LITERAL_NUMBER_DECIMAL_INT); }
{HexLiteral} { addToken(Token.LITERAL_NUMBER_HEXADECIMAL); }
{FloatLiteral} { addToken(Token.LITERAL_NUMBER_FLOAT); }
{ErrorNumberFormat} { addToken(Token.ERROR_NUMBER_FORMAT); }
{ErrorIdentifier} { addToken(Token.ERROR_IDENTIFIER); }
/* Ended with a line not in a string or comment. */
<> { addNullToken(); return firstToken; }
/* Catch any other (unhandled) characters and flag them as bad. */
. { addToken(Token.ERROR_IDENTIFIER); }
}
{
[^\n\\\"]+ {}
\n { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); return firstToken; }
\\.? { /* Skip escaped chars. */ }
\" { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); }
<> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); return firstToken; }
}
{
[^hwf\n\|]+ {}
{URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, Token.COMMENT_MULTILINE); addHyperlinkToken(temp,zzMarkedPos-1, Token.COMMENT_MULTILINE); start = zzMarkedPos; }
[hwf] {}
\n { addToken(start,zzStartRead-1, Token.COMMENT_MULTILINE); return firstToken; }
{MLCEnd} { yybegin(YYINITIAL); addToken(start,zzStartRead+1, Token.COMMENT_MULTILINE); }
\| {}
<> { addToken(start,zzStartRead-1, Token.COMMENT_MULTILINE); return firstToken; }
}
{
[^hwf\n]+ {}
{URL} { int temp=zzStartRead; addToken(start,zzStartRead-1, Token.COMMENT_EOL); addHyperlinkToken(temp,zzMarkedPos-1, Token.COMMENT_EOL); start = zzMarkedPos; }
[hwf] {}
\n { addToken(start,zzStartRead-1, Token.COMMENT_EOL); addNullToken(); return firstToken; }
<> { addToken(start,zzStartRead-1, Token.COMMENT_EOL); addNullToken(); return firstToken; }
}