
org.fife.ui.rsyntaxtextarea.modes.DockerTokenMaker.flex Maven / Gradle / Ivy
Show all versions of rsyntaxtextarea Show documentation
/*
* 11/24/2015
*
* This library is distributed under a modified BSD license. See the included
* LICENSE file for details.
*/
package org.fife.ui.rsyntaxtextarea.modes;
import java.io.*;
import javax.swing.text.Segment;
import org.fife.ui.rsyntaxtextarea.*;
/**
* Scanner for Dockerfiles.
*
* This implementation was created using
* JFlex 1.4.1; however, the generated file
* was modified for performance. Memory allocation needs to be almost
* completely removed to be competitive with the handwritten lexers (subclasses
* of AbstractTokenMaker
), so this class has been modified so that
* Strings are never allocated (via yytext()), and the scanner never has to
* worry about refilling its buffer (needlessly copying chars around).
* We can achieve this because RText always scans exactly 1 line of tokens at a
* time, and hands the scanner this line as an array of characters (a Segment
* really). Since tokens contain pointers to char arrays instead of Strings
* holding their contents, there is no need for allocating new memory for
* Strings.
*
* The actual algorithm generated for scanning has, of course, not been
* modified.
*
* If you wish to regenerate this file yourself, keep in mind the following:
*
* - The generated
DockerTokenMaker.java
file will contain two
* definitions of both zzRefill
and yyreset
.
* You should hand-delete the second of each definition (the ones
* generated by the lexer), as these generated methods modify the input
* buffer, which we'll never have to do.
* - You should also change the declaration/definition of zzBuffer to NOT
* be initialized. This is a needless memory allocation for us since we
* will be pointing the array somewhere else anyway.
* - You should NOT call
yylex()
on the generated scanner
* directly; rather, you should use getTokenList
as you would
* with any other TokenMaker
instance.
*
*
* @author Robert Futrell
* @version 0.5
*
*/
%%
%public
%class DockerTokenMaker
%extends AbstractJFlexTokenMaker
%unicode
%ignorecase
%type org.fife.ui.rsyntaxtextarea.Token
%{
/**
* Constructor. This must be here because JFlex does not generate a
* no-parameter constructor.
*/
public DockerTokenMaker() {
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param tokenType The token's type.
*/
private void addToken(int tokenType) {
addToken(zzStartRead, zzMarkedPos-1, tokenType);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param tokenType The token's type.
*/
private void addToken(int start, int end, int tokenType) {
int so = start + offsetShift;
addToken(zzBuffer, start,end, tokenType, so);
}
/**
* Adds the token specified to the current linked list of tokens.
*
* @param array The character array.
* @param start The starting offset in the array.
* @param end The ending offset in the array.
* @param tokenType The token's type.
* @param startOffset The offset in the document at which this token
* occurs.
*/
@Override
public void addToken(char[] array, int start, int end, int tokenType, int startOffset) {
super.addToken(array, start,end, tokenType, startOffset);
zzStartRead = zzMarkedPos;
}
@Override
public String[] getLineCommentStartAndEnd(int languageIndex) {
return new String[] { "#", null };
}
/**
* Returns whether tokens of the specified type should have "mark
* occurrences" enabled for the current programming language.
*
* @param type The token type.
* @return Whether tokens of this type should have "mark occurrences"
* enabled.
*/
@Override
public boolean getMarkOccurrencesOfTokenType(int type) {
return type==Token.IDENTIFIER || type==Token.RESERVED_WORD;
}
/**
* Returns the first token in the linked list of tokens generated
* from text
. This method must be implemented by
* subclasses so they can correctly implement syntax highlighting.
*
* @param text The text from which to get tokens.
* @param initialTokenType The token type we should start with.
* @param startOffset The offset into the document at which
* text
starts.
* @return The first Token
in a linked list representing
* the syntax highlighted text.
*/
public Token getTokenList(Segment text, int initialTokenType, int startOffset) {
resetTokenList();
this.offsetShift = -text.offset + startOffset;
// Start off in the proper state.
int state = TokenTypes.NULL;
s = text;
try {
yyreset(zzReader);
yybegin(state);
return yylex();
} catch (IOException ioe) {
ioe.printStackTrace();
return new TokenImpl();
}
}
/**
* Refills the input buffer.
*
* @return true
if EOF was reached, otherwise
* false
.
*/
private boolean zzRefill() {
return zzCurrentPos>=s.offset+s.count;
}
/**
* Resets the scanner to read from a new input stream.
* Does not close the old reader.
*
* All internal variables are reset, the old input stream
* cannot be reused (internal buffer is discarded and lost).
* Lexical state is set to YY_INITIAL.
*
* @param reader the new input stream
*/
public final void yyreset(Reader reader) {
// 's' has been updated.
zzBuffer = s.array;
/*
* We replaced the line below with the two below it because zzRefill
* no longer "refills" the buffer (since the way we do it, it's always
* "full" the first time through, since it points to the segment's
* array). So, we assign zzEndRead here.
*/
//zzStartRead = zzEndRead = s.offset;
zzStartRead = s.offset;
zzEndRead = zzStartRead + s.count - 1;
zzCurrentPos = zzMarkedPos = zzPushbackPos = s.offset;
zzLexicalState = YYINITIAL;
zzReader = reader;
zzAtBOL = true;
zzAtEOF = false;
}
%}
Letter = [A-Za-z]
Digit = [0-9]
IdentifierPart = ({Letter}|{Digit}|[_\-\.])
Identifier = ({IdentifierPart}|{IdentifierPart}*)
WhiteSpace = ([ \t\f])
%state STRING
%state CHAR_LITERAL
%%
{
/* Keywords */
"maintainer" |
"from" |
"onbuild" |
"run" |
"expose" |
"env" |
"add" |
"copy" |
"volume" |
"user" |
"workdir" |
"cmd" |
"entrypoint" |
"label" |
"arg" |
"stopsignal" { addToken(Token.RESERVED_WORD); }
{Identifier} { addToken(Token.IDENTIFIER); }
{WhiteSpace}+ { addToken(Token.WHITESPACE); }
"[" |
"]" { addToken(Token.SEPARATOR); }
"|" |
">" |
">>" { addToken(Token.OPERATOR); }
/* String/Character literals. */
\" { start = zzMarkedPos-1; yybegin(STRING); }
\' { start = zzMarkedPos-1; yybegin(CHAR_LITERAL); }
/* Comment literals. */
"#".* { addToken(Token.COMMENT_EOL); addNullToken(); return firstToken; }
/* Ended with a line not in a string or comment. */
"\n" |
<> { addNullToken(); return firstToken; }
/* Catch any other (unhandled) characters. */
. { addToken(Token.IDENTIFIER); }
}
{
[^\n\\\"]+ {}
\\.? { /* Skip escaped chars. */ }
\" { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_STRING_DOUBLE_QUOTE); }
\n |
<> { addToken(start,zzStartRead-1, Token.LITERAL_STRING_DOUBLE_QUOTE); return firstToken; }
}
{
[^\n\\\']+ {}
\\.? { /* Skip escaped single quotes only, but this should still work. */ }
\' { yybegin(YYINITIAL); addToken(start,zzStartRead, Token.LITERAL_CHAR); }
\n |
<> { addToken(start,zzStartRead-1, Token.LITERAL_CHAR); return firstToken; }
}