
ai.vespa.schemals.parser.Token Maven / Gradle / Ivy
/*
* Generated by: CongoCC Parser Generator. Token.java
*/
package ai.vespa.schemals.parser;
import ai.vespa.schemals.parser.ast.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
public class Token implements CharSequence, Node.TerminalNode {
public class ParseExceptionSource {
public ParseException parseException;
public int beginOffset;
public int endOffset;
ParseExceptionSource(ParseException e, int beginOffset, int endOffset) {
parseException = e;
this.beginOffset = beginOffset;
this.endOffset = endOffset;
}
}
private ParseExceptionSource parseExceptionSource;
public ParseExceptionSource getParseExceptionSource() {
return parseExceptionSource;
}
void addParseException(ParseException e, int beginOffset, int endOffset) {
parseExceptionSource = new ParseExceptionSource(e, beginOffset, endOffset);
setDirty(true);
}
void addParseException(ParseException e) {
addParseException(e, getBeginOffset(), getEndOffset());
}
public class IllegalArgumentExceptionSource {
public IllegalArgumentException illegalArgumentException;
public Token endToken;
IllegalArgumentExceptionSource(IllegalArgumentException e, Token endToken) {
illegalArgumentException = e;
this.endToken = endToken;
}
}
private IllegalArgumentException illegalArgumentException;
public IllegalArgumentException getIllegalArguemntException() {
return illegalArgumentException;
}
void addIllegalArugmentException(IllegalArgumentException e) {
illegalArgumentException = e;
}
public enum TokenType implements Node.NodeType {
EOF, _TOKEN_1, _TOKEN_2, _TOKEN_3, _TOKEN_4, NL, ANNOTATION, ANNOTATIONREFERENCE,
SCHEMA, SEARCH, DIVERSITY, MIN_GROUPS, CUTOFF_FACTOR, CUTOFF_STRATEGY, LOOSE,
STRICT, DOCUMENT, OPERATION, ON_MATCH, ON_FIRST_PHASE, ON_SECOND_PHASE, ON_SUMMARY,
STRUCT, INHERITS, FIELD, FIELDS, FIELDSET, STRUCT_FIELD, IMPORT, AS, INDEXING,
SUMMARY_TO, DOCUMENT_SUMMARY, RANK_TYPE, WEIGHT, TYPE, INDEX, INPUTS, MTOKEN,
TEXT, WORD, GRAM, GRAM_SIZE, MAX_LENGTH, MAX_OCCURRENCES, MAX_TOKEN_LENGTH,
PREFIX, SUBSTRING, SUFFIX, CONSTANT, ONNX_MODEL, SIGNIFICANCE, USE_MODEL,
INTRAOP_THREADS, INTEROP_THREADS, GPU_DEVICE, EXECUTION_MODE, PARALLEL, SEQUENTIAL,
MODEL, MUTATE, QUERY, RANK_PROFILE, RAW_AS_BASE64_IN_SUMMARY, SUMMARY, FULL,
STATIC, DYNAMIC, TOKENS, MATCHED_ELEMENTS_ONLY, SSCONTEXTUAL, SSOVERRIDE,
SSTITLE, SSURL, PROPERTIES, ATTRIBUTE, SORTING, DICTIONARY, ASCENDING, DESCENDING,
UCA, RAW, LOWERCASE, FUNCTION, LOCALE, STRENGTH, PRIMARY, SECONDARY, TERTIARY,
QUATERNARY, IDENTICAL, STEMMING, NORMALIZING, HASH, BTREE, CASED, UNCASED,
BOLDING, NONE, ON, OFF, TRUE, FALSE, SYMMETRIC, QUERY_COMMAND, ALIAS, MATCH,
RANK, LITERAL, EXACT, FILTER, NORMAL, EXACT_TERMINATOR, IGNORE_DEFAULT_RANK_FEATURES,
ID, SOURCE, TO, DIRECT, FROM_DISK, OMIT_SUMMARY_FEATURES, ALWAYS, ON_DEMAND,
NEVER, ENABLE_BIT_VECTORS, ENABLE_ONLY_BIT_VECTOR, FAST_ACCESS, MUTABLE, PAGED,
FAST_RANK, FAST_SEARCH, LBRACE, RBRACE, COLON, DOT, COMMA, ARRAY, WEIGHTEDSET,
MAP, REFERENCE, QUESTIONMARK, CREATE_IF_NONEXISTENT, REMOVE_IF_ZERO, MATCH_PHASE,
EVALUATION_POINT, PRE_POST_FILTER_TIPPING_POINT, ORDER, MAX_FILTER_COVERAGE,
MAX_HITS, FIRST_PHASE, SECOND_PHASE, GLOBAL_PHASE, MACRO, INLINE, ARITY, LOWER_BOUND,
UPPER_BOUND, DENSE_POSTING_LIST_THRESHOLD, ENABLE_BM25, HNSW, MAX_LINKS_PER_NODE,
DOUBLE_KEYWORD, FLOAT_KEYWORD, LONG_KEYWORD, STRING_KEYWORD, DISTANCE_METRIC,
NEIGHBORS_TO_EXPLORE_AT_INSERT, MULTI_THREADED_INDEXING, RANK_PROPERTIES,
RERANK_COUNT, NUM_THREADS_PER_SEARCH, MIN_HITS_PER_THREAD, NUM_SEARCH_PARTITIONS,
TERMWISE_LIMIT, POST_FILTER_THRESHOLD, APPROXIMATE_THRESHOLD, TARGET_HITS_MAX_ADJUSTMENT_FACTOR,
KEEP_RANK_COUNT, RANK_SCORE_DROP_LIMIT, CONSTANTS, FILE, URI, LESSTHAN, GREATERTHAN,
_TOKEN_183, _TOKEN_184, _TOKEN_185, _TOKEN_186, $, _TOKEN_188, _TOKEN_189,
_TOKEN_190, _TOKEN_191, TENSOR_TYPE, TENSOR_VALUE_SL, TENSOR_VALUE_ML, MATCHFEATURES_SL,
MATCHFEATURES_ML, MATCHFEATURES_ML_INHERITS, SUMMARYFEATURES_SL, SUMMARYFEATURES_ML,
SUMMARYFEATURES_ML_INHERITS, RANKFEATURES_SL, RANKFEATURES_ML, EXPRESSION_SL,
EXPRESSION_ML, IDENTIFIER, IDENTIFIER_WITH_DASH, DOUBLEQUOTEDSTRING, SINGLEQUOTEDSTRING,
CONTEXT, DOUBLE, INTEGER, LONG, STRING, FILE_PATH, HTTP, URI_PATH, VARIABLE,
ONNX_INPUT_SL, ONNX_OUTPUT_SL, SINGLE_LINE_COMMENT, DUMMY, INVALID;
public boolean isUndefined() {
return this == DUMMY;
}
public boolean isInvalid() {
return this == INVALID;
}
public boolean isEOF() {
return this == EOF;
}
}
private SchemaParserLexer tokenSource;
private TokenType type = TokenType.DUMMY;
private int beginOffset;
private int endOffset;
private boolean unparsed;
private Node parent;
/**
* It would be extremely rare that an application
* programmer would use this method. It needs to
* be public because it is part of the ai.vespa.schemals.parser.Node interface.
*/
public void setBeginOffset(int beginOffset) {
this.beginOffset = beginOffset;
}
/**
* It would be extremely rare that an application
* programmer would use this method. It needs to
* be public because it is part of the ai.vespa.schemals.parser.Node interface.
*/
public void setEndOffset(int endOffset) {
this.endOffset = endOffset;
}
/**
* @return the SchemaParserLexer object that handles
* location info for the tokens.
*/
public SchemaParserLexer getTokenSource() {
return this.tokenSource;
}
/**
* It should be exceedingly rare that an application
* programmer needs to use this method.
*/
public void setTokenSource(TokenSource tokenSource) {
this.tokenSource = (SchemaParserLexer) tokenSource;
}
public boolean isInvalid() {
return getType().isInvalid();
}
/**
* Return the TokenType of this Token object
*/
@Override
public TokenType getType() {
return type;
}
protected void setType(TokenType type) {
this.type = type;
}
/**
* @return whether this Token represent actual input or was it inserted somehow?
*/
public boolean isVirtual() {
return virtual || type == TokenType.EOF;
}
/**
* @return Did we skip this token in parsing?
*/
public boolean isSkipped() {
return skipped;
}
private boolean virtual;
private boolean skipped;
private boolean dirty;
void setVirtual(boolean virtual) {
this.virtual = virtual;
if (virtual) dirty = true;
}
void setSkipped(boolean skipped) {
this.skipped = skipped;
if (skipped) dirty = true;
}
public boolean isDirty() {
return dirty;
}
public void setDirty(boolean dirty) {
this.dirty = dirty;
}
public int getBeginOffset() {
return beginOffset;
}
public int getEndOffset() {
return endOffset;
}
/**
* @return the string image of the token.
*/
@Override
/**
* @return the next _cached_ regular (i.e. parsed) token
* or null
*/
public final Token getNext() {
return getNextParsedToken();
}
/**
* @return the previous regular (i.e. parsed) token
* or null
*/
public final Token getPrevious() {
Token result = previousCachedToken();
while (result != null && result.isUnparsed()) {
result = result.previousCachedToken();
}
return result;
}
/**
* @return the next regular (i.e. parsed) token
*/
private Token getNextParsedToken() {
Token result = nextCachedToken();
while (result != null && result.isUnparsed()) {
result = result.nextCachedToken();
}
return result;
}
/**
* @return the next token of any sort (parsed or unparsed or invalid)
*/
public Token nextCachedToken() {
if (getType() == TokenType.EOF) return null;
SchemaParserLexer tokenSource = getTokenSource();
return tokenSource != null ? (Token) tokenSource.nextCachedToken(getEndOffset()) : null;
}
public Token previousCachedToken() {
if (getTokenSource() == null) return null;
return (Token) getTokenSource().previousCachedToken(getBeginOffset());
}
Token getPreviousToken() {
return previousCachedToken();
}
public Token replaceType(TokenType type) {
Token result = newToken(type, getTokenSource(), getBeginOffset(), getEndOffset());
getTokenSource().cacheToken(result);
return result;
}
public String getSource() {
if (type == TokenType.EOF) return "";
SchemaParserLexer ts = getTokenSource();
return ts == null ? null : ts.getText(getBeginOffset(), getEndOffset());
}
protected Token() {
}
public Token(TokenType type, SchemaParserLexer tokenSource, int beginOffset, int endOffset) {
this.type = type;
this.tokenSource = tokenSource;
this.beginOffset = beginOffset;
this.endOffset = endOffset;
}
public boolean isUnparsed() {
return unparsed;
}
public void setUnparsed(boolean unparsed) {
this.unparsed = unparsed;
}
/**
* @return An iterator of the tokens preceding this one.
*/
public Iterator precedingTokens() {
return new Iterator() {
Token currentPoint = Token.this;
public boolean hasNext() {
return currentPoint.previousCachedToken() != null;
}
public Token next() {
Token previous = currentPoint.previousCachedToken();
if (previous == null) throw new java.util.NoSuchElementException("No previous token!");
return currentPoint = previous;
}
};
}
/**
* @return a list of the unparsed tokens preceding this one in the order they appear in the input
*/
public List precedingUnparsedTokens() {
List result = new ArrayList<>();
Token t = this.previousCachedToken();
while (t != null && t.isUnparsed()) {
result.add(t);
t = t.previousCachedToken();
}
Collections.reverse(result);
return result;
}
/**
* @return An iterator of the (cached) tokens that follow this one.
*/
public Iterator followingTokens() {
return new java.util.Iterator() {
Token currentPoint = Token.this;
public boolean hasNext() {
return currentPoint.nextCachedToken() != null;
}
public Token next() {
Token next = currentPoint.nextCachedToken();
if (next == null) throw new java.util.NoSuchElementException("No next token!");
return currentPoint = next;
}
};
}
public void copyLocationInfo(Token from) {
setTokenSource(from.getTokenSource());
setBeginOffset(from.getBeginOffset());
setEndOffset(from.getEndOffset());
}
public void copyLocationInfo(Token start, Token end) {
setTokenSource(start.getTokenSource());
if (tokenSource == null) setTokenSource(end.getTokenSource());
setBeginOffset(start.getBeginOffset());
setEndOffset(end.getEndOffset());
}
public static Token newToken(TokenType type, SchemaParserLexer tokenSource, int beginOffset, int endOffset) {
switch(type) {
case DOUBLE_KEYWORD :
return new DOUBLE_KEYWORD(TokenType.DOUBLE_KEYWORD, tokenSource, beginOffset, endOffset);
case RANK_PROPERTIES :
return new RANK_PROPERTIES(TokenType.RANK_PROPERTIES, tokenSource, beginOffset, endOffset);
case NEVER :
return new NEVER(TokenType.NEVER, tokenSource, beginOffset, endOffset);
case SECONDARY :
return new SECONDARY(TokenType.SECONDARY, tokenSource, beginOffset, endOffset);
case RAW :
return new RAW(TokenType.RAW, tokenSource, beginOffset, endOffset);
case RANK :
return new RANK(TokenType.RANK, tokenSource, beginOffset, endOffset);
case MUTABLE :
return new MUTABLE(TokenType.MUTABLE, tokenSource, beginOffset, endOffset);
case CONTEXT :
return new CONTEXT(TokenType.CONTEXT, tokenSource, beginOffset, endOffset);
case STATIC :
return new STATIC(TokenType.STATIC, tokenSource, beginOffset, endOffset);
case SSOVERRIDE :
return new SSOVERRIDE(TokenType.SSOVERRIDE, tokenSource, beginOffset, endOffset);
case SIGNIFICANCE :
return new SIGNIFICANCE(TokenType.SIGNIFICANCE, tokenSource, beginOffset, endOffset);
case DOUBLEQUOTEDSTRING :
return new DOUBLEQUOTEDSTRING(TokenType.DOUBLEQUOTEDSTRING, tokenSource, beginOffset, endOffset);
case DIVERSITY :
return new DIVERSITY(TokenType.DIVERSITY, tokenSource, beginOffset, endOffset);
case WEIGHT :
return new WEIGHT(TokenType.WEIGHT, tokenSource, beginOffset, endOffset);
case GLOBAL_PHASE :
return new GLOBAL_PHASE(TokenType.GLOBAL_PHASE, tokenSource, beginOffset, endOffset);
case SUMMARYFEATURES_ML :
return new SUMMARYFEATURES_ML(TokenType.SUMMARYFEATURES_ML, tokenSource, beginOffset, endOffset);
case SEARCH :
return new SEARCH(TokenType.SEARCH, tokenSource, beginOffset, endOffset);
case LOOSE :
return new LOOSE(TokenType.LOOSE, tokenSource, beginOffset, endOffset);
case AS :
return new AS(TokenType.AS, tokenSource, beginOffset, endOffset);
case QUERY :
return new QUERY(TokenType.QUERY, tokenSource, beginOffset, endOffset);
case SOURCE :
return new SOURCE(TokenType.SOURCE, tokenSource, beginOffset, endOffset);
case RANK_PROFILE :
return new RANK_PROFILE(TokenType.RANK_PROFILE, tokenSource, beginOffset, endOffset);
case MULTI_THREADED_INDEXING :
return new MULTI_THREADED_INDEXING(TokenType.MULTI_THREADED_INDEXING, tokenSource, beginOffset, endOffset);
case LBRACE :
return new LBRACE(TokenType.LBRACE, tokenSource, beginOffset, endOffset);
case REMOVE_IF_ZERO :
return new REMOVE_IF_ZERO(TokenType.REMOVE_IF_ZERO, tokenSource, beginOffset, endOffset);
case TOKENS :
return new TOKENS(TokenType.TOKENS, tokenSource, beginOffset, endOffset);
case SSCONTEXTUAL :
return new SSCONTEXTUAL(TokenType.SSCONTEXTUAL, tokenSource, beginOffset, endOffset);
case URI_PATH :
return new URI_PATH(TokenType.URI_PATH, tokenSource, beginOffset, endOffset);
case TENSOR_VALUE_SL :
return new TENSOR_VALUE_SL(TokenType.TENSOR_VALUE_SL, tokenSource, beginOffset, endOffset);
case MIN_HITS_PER_THREAD :
return new MIN_HITS_PER_THREAD(TokenType.MIN_HITS_PER_THREAD, tokenSource, beginOffset, endOffset);
case MATCHFEATURES_SL :
return new MATCHFEATURES_SL(TokenType.MATCHFEATURES_SL, tokenSource, beginOffset, endOffset);
case QUESTIONMARK :
return new QUESTIONMARK(TokenType.QUESTIONMARK, tokenSource, beginOffset, endOffset);
case VARIABLE :
return new VARIABLE(TokenType.VARIABLE, tokenSource, beginOffset, endOffset);
case SSURL :
return new SSURL(TokenType.SSURL, tokenSource, beginOffset, endOffset);
case ON_MATCH :
return new ON_MATCH(TokenType.ON_MATCH, tokenSource, beginOffset, endOffset);
case LONG :
return new LONG(TokenType.LONG, tokenSource, beginOffset, endOffset);
case FILE_PATH :
return new FILE_PATH(TokenType.FILE_PATH, tokenSource, beginOffset, endOffset);
case FIELDSET :
return new FIELDSET(TokenType.FIELDSET, tokenSource, beginOffset, endOffset);
case FAST_RANK :
return new FAST_RANK(TokenType.FAST_RANK, tokenSource, beginOffset, endOffset);
case LOCALE :
return new LOCALE(TokenType.LOCALE, tokenSource, beginOffset, endOffset);
case OPERATION :
return new OPERATION(TokenType.OPERATION, tokenSource, beginOffset, endOffset);
case GRAM :
return new GRAM(TokenType.GRAM, tokenSource, beginOffset, endOffset);
case NUM_SEARCH_PARTITIONS :
return new NUM_SEARCH_PARTITIONS(TokenType.NUM_SEARCH_PARTITIONS, tokenSource, beginOffset, endOffset);
case STRING :
return new STRING(TokenType.STRING, tokenSource, beginOffset, endOffset);
case TO :
return new TO(TokenType.TO, tokenSource, beginOffset, endOffset);
case LONG_KEYWORD :
return new LONG_KEYWORD(TokenType.LONG_KEYWORD, tokenSource, beginOffset, endOffset);
case PARALLEL :
return new PARALLEL(TokenType.PARALLEL, tokenSource, beginOffset, endOffset);
case PROPERTIES :
return new PROPERTIES(TokenType.PROPERTIES, tokenSource, beginOffset, endOffset);
case STRUCT_FIELD :
return new STRUCT_FIELD(TokenType.STRUCT_FIELD, tokenSource, beginOffset, endOffset);
case INDEXING :
return new INDEXING(TokenType.INDEXING, tokenSource, beginOffset, endOffset);
case ANNOTATIONREFERENCE :
return new ANNOTATIONREFERENCE(TokenType.ANNOTATIONREFERENCE, tokenSource, beginOffset, endOffset);
case MAX_LINKS_PER_NODE :
return new MAX_LINKS_PER_NODE(TokenType.MAX_LINKS_PER_NODE, tokenSource, beginOffset, endOffset);
case EXECUTION_MODE :
return new EXECUTION_MODE(TokenType.EXECUTION_MODE, tokenSource, beginOffset, endOffset);
case DESCENDING :
return new DESCENDING(TokenType.DESCENDING, tokenSource, beginOffset, endOffset);
case MATCHFEATURES_ML :
return new MATCHFEATURES_ML(TokenType.MATCHFEATURES_ML, tokenSource, beginOffset, endOffset);
case INDEX :
return new INDEX(TokenType.INDEX, tokenSource, beginOffset, endOffset);
case CUTOFF_STRATEGY :
return new CUTOFF_STRATEGY(TokenType.CUTOFF_STRATEGY, tokenSource, beginOffset, endOffset);
case INTEGER :
return new INTEGER(TokenType.INTEGER, tokenSource, beginOffset, endOffset);
case STRICT :
return new STRICT(TokenType.STRICT, tokenSource, beginOffset, endOffset);
case MATCHFEATURES_ML_INHERITS :
return new MATCHFEATURES_ML_INHERITS(TokenType.MATCHFEATURES_ML_INHERITS, tokenSource, beginOffset, endOffset);
case ATTRIBUTE :
return new ATTRIBUTE(TokenType.ATTRIBUTE, tokenSource, beginOffset, endOffset);
case ONNX_OUTPUT_SL :
return new ONNX_OUTPUT_SL(TokenType.ONNX_OUTPUT_SL, tokenSource, beginOffset, endOffset);
case ENABLE_ONLY_BIT_VECTOR :
return new ENABLE_ONLY_BIT_VECTOR(TokenType.ENABLE_ONLY_BIT_VECTOR, tokenSource, beginOffset, endOffset);
case HTTP :
return new HTTP(TokenType.HTTP, tokenSource, beginOffset, endOffset);
case FULL :
return new FULL(TokenType.FULL, tokenSource, beginOffset, endOffset);
case RBRACE :
return new RBRACE(TokenType.RBRACE, tokenSource, beginOffset, endOffset);
case MATCHED_ELEMENTS_ONLY :
return new MATCHED_ELEMENTS_ONLY(TokenType.MATCHED_ELEMENTS_ONLY, tokenSource, beginOffset, endOffset);
case GRAM_SIZE :
return new GRAM_SIZE(TokenType.GRAM_SIZE, tokenSource, beginOffset, endOffset);
case KEEP_RANK_COUNT :
return new KEEP_RANK_COUNT(TokenType.KEEP_RANK_COUNT, tokenSource, beginOffset, endOffset);
case TERTIARY :
return new TERTIARY(TokenType.TERTIARY, tokenSource, beginOffset, endOffset);
case SSTITLE :
return new SSTITLE(TokenType.SSTITLE, tokenSource, beginOffset, endOffset);
case SCHEMA :
return new SCHEMA(TokenType.SCHEMA, tokenSource, beginOffset, endOffset);
case FAST_SEARCH :
return new FAST_SEARCH(TokenType.FAST_SEARCH, tokenSource, beginOffset, endOffset);
case CASED :
return new CASED(TokenType.CASED, tokenSource, beginOffset, endOffset);
case IDENTIFIER :
return new IDENTIFIER(TokenType.IDENTIFIER, tokenSource, beginOffset, endOffset);
case FILTER :
return new FILTER(TokenType.FILTER, tokenSource, beginOffset, endOffset);
case EXPRESSION_SL :
return new EXPRESSION_SL(TokenType.EXPRESSION_SL, tokenSource, beginOffset, endOffset);
case COLON :
return new COLON(TokenType.COLON, tokenSource, beginOffset, endOffset);
case INPUTS :
return new INPUTS(TokenType.INPUTS, tokenSource, beginOffset, endOffset);
case SUMMARYFEATURES_ML_INHERITS :
return new SUMMARYFEATURES_ML_INHERITS(TokenType.SUMMARYFEATURES_ML_INHERITS, tokenSource, beginOffset, endOffset);
case ONNX_MODEL :
return new ONNX_MODEL(TokenType.ONNX_MODEL, tokenSource, beginOffset, endOffset);
case INHERITS :
return new INHERITS(TokenType.INHERITS, tokenSource, beginOffset, endOffset);
case QUERY_COMMAND :
return new QUERY_COMMAND(TokenType.QUERY_COMMAND, tokenSource, beginOffset, endOffset);
case OMIT_SUMMARY_FEATURES :
return new OMIT_SUMMARY_FEATURES(TokenType.OMIT_SUMMARY_FEATURES, tokenSource, beginOffset, endOffset);
case RANK_TYPE :
return new RANK_TYPE(TokenType.RANK_TYPE, tokenSource, beginOffset, endOffset);
case UPPER_BOUND :
return new UPPER_BOUND(TokenType.UPPER_BOUND, tokenSource, beginOffset, endOffset);
case FALSE :
return new FALSE(TokenType.FALSE, tokenSource, beginOffset, endOffset);
case LOWERCASE :
return new LOWERCASE(TokenType.LOWERCASE, tokenSource, beginOffset, endOffset);
case RERANK_COUNT :
return new RERANK_COUNT(TokenType.RERANK_COUNT, tokenSource, beginOffset, endOffset);
case ENABLE_BIT_VECTORS :
return new ENABLE_BIT_VECTORS(TokenType.ENABLE_BIT_VECTORS, tokenSource, beginOffset, endOffset);
case ON_SUMMARY :
return new ON_SUMMARY(TokenType.ON_SUMMARY, tokenSource, beginOffset, endOffset);
case SYMMETRIC :
return new SYMMETRIC(TokenType.SYMMETRIC, tokenSource, beginOffset, endOffset);
case ARITY :
return new ARITY(TokenType.ARITY, tokenSource, beginOffset, endOffset);
case INTEROP_THREADS :
return new INTEROP_THREADS(TokenType.INTEROP_THREADS, tokenSource, beginOffset, endOffset);
case TEXT :
return new TEXT(TokenType.TEXT, tokenSource, beginOffset, endOffset);
case PAGED :
return new PAGED(TokenType.PAGED, tokenSource, beginOffset, endOffset);
case NUM_THREADS_PER_SEARCH :
return new NUM_THREADS_PER_SEARCH(TokenType.NUM_THREADS_PER_SEARCH, tokenSource, beginOffset, endOffset);
case TERMWISE_LIMIT :
return new TERMWISE_LIMIT(TokenType.TERMWISE_LIMIT, tokenSource, beginOffset, endOffset);
case TENSOR_VALUE_ML :
return new TENSOR_VALUE_ML(TokenType.TENSOR_VALUE_ML, tokenSource, beginOffset, endOffset);
case ANNOTATION :
return new ANNOTATION(TokenType.ANNOTATION, tokenSource, beginOffset, endOffset);
case DOCUMENT :
return new DOCUMENT(TokenType.DOCUMENT, tokenSource, beginOffset, endOffset);
case DICTIONARY :
return new DICTIONARY(TokenType.DICTIONARY, tokenSource, beginOffset, endOffset);
case RANK_SCORE_DROP_LIMIT :
return new RANK_SCORE_DROP_LIMIT(TokenType.RANK_SCORE_DROP_LIMIT, tokenSource, beginOffset, endOffset);
case DISTANCE_METRIC :
return new DISTANCE_METRIC(TokenType.DISTANCE_METRIC, tokenSource, beginOffset, endOffset);
case ID :
return new ID(TokenType.ID, tokenSource, beginOffset, endOffset);
case NONE :
return new NONE(TokenType.NONE, tokenSource, beginOffset, endOffset);
case TYPE :
return new TYPE(TokenType.TYPE, tokenSource, beginOffset, endOffset);
case FIELDS :
return new FIELDS(TokenType.FIELDS, tokenSource, beginOffset, endOffset);
case SUFFIX :
return new SUFFIX(TokenType.SUFFIX, tokenSource, beginOffset, endOffset);
case WORD :
return new WORD(TokenType.WORD, tokenSource, beginOffset, endOffset);
case LITERAL :
return new LITERAL(TokenType.LITERAL, tokenSource, beginOffset, endOffset);
case FIELD :
return new FIELD(TokenType.FIELD, tokenSource, beginOffset, endOffset);
case DENSE_POSTING_LIST_THRESHOLD :
return new DENSE_POSTING_LIST_THRESHOLD(TokenType.DENSE_POSTING_LIST_THRESHOLD, tokenSource, beginOffset, endOffset);
case DOT :
return new DOT(TokenType.DOT, tokenSource, beginOffset, endOffset);
case BTREE :
return new BTREE(TokenType.BTREE, tokenSource, beginOffset, endOffset);
case INTRAOP_THREADS :
return new INTRAOP_THREADS(TokenType.INTRAOP_THREADS, tokenSource, beginOffset, endOffset);
case STRENGTH :
return new STRENGTH(TokenType.STRENGTH, tokenSource, beginOffset, endOffset);
case EXACT :
return new EXACT(TokenType.EXACT, tokenSource, beginOffset, endOffset);
case SECOND_PHASE :
return new SECOND_PHASE(TokenType.SECOND_PHASE, tokenSource, beginOffset, endOffset);
case FUNCTION :
return new FUNCTION(TokenType.FUNCTION, tokenSource, beginOffset, endOffset);
case PRE_POST_FILTER_TIPPING_POINT :
return new PRE_POST_FILTER_TIPPING_POINT(TokenType.PRE_POST_FILTER_TIPPING_POINT, tokenSource, beginOffset, endOffset);
case EXPRESSION_ML :
return new EXPRESSION_ML(TokenType.EXPRESSION_ML, tokenSource, beginOffset, endOffset);
case HASH :
return new HASH(TokenType.HASH, tokenSource, beginOffset, endOffset);
case IGNORE_DEFAULT_RANK_FEATURES :
return new IGNORE_DEFAULT_RANK_FEATURES(TokenType.IGNORE_DEFAULT_RANK_FEATURES, tokenSource, beginOffset, endOffset);
case STEMMING :
return new STEMMING(TokenType.STEMMING, tokenSource, beginOffset, endOffset);
case ONNX_INPUT_SL :
return new ONNX_INPUT_SL(TokenType.ONNX_INPUT_SL, tokenSource, beginOffset, endOffset);
case NORMALIZING :
return new NORMALIZING(TokenType.NORMALIZING, tokenSource, beginOffset, endOffset);
case CUTOFF_FACTOR :
return new CUTOFF_FACTOR(TokenType.CUTOFF_FACTOR, tokenSource, beginOffset, endOffset);
case MAP :
return new MAP(TokenType.MAP, tokenSource, beginOffset, endOffset);
case UNCASED :
return new UNCASED(TokenType.UNCASED, tokenSource, beginOffset, endOffset);
case RANKFEATURES_ML :
return new RANKFEATURES_ML(TokenType.RANKFEATURES_ML, tokenSource, beginOffset, endOffset);
case ALWAYS :
return new ALWAYS(TokenType.ALWAYS, tokenSource, beginOffset, endOffset);
case URI :
return new URI(TokenType.URI, tokenSource, beginOffset, endOffset);
case OFF :
return new OFF(TokenType.OFF, tokenSource, beginOffset, endOffset);
case IMPORT :
return new IMPORT(TokenType.IMPORT, tokenSource, beginOffset, endOffset);
case ORDER :
return new ORDER(TokenType.ORDER, tokenSource, beginOffset, endOffset);
case DOUBLE :
return new DOUBLE(TokenType.DOUBLE, tokenSource, beginOffset, endOffset);
case FILE :
return new FILE(TokenType.FILE, tokenSource, beginOffset, endOffset);
case COMMA :
return new COMMA(TokenType.COMMA, tokenSource, beginOffset, endOffset);
case CREATE_IF_NONEXISTENT :
return new CREATE_IF_NONEXISTENT(TokenType.CREATE_IF_NONEXISTENT, tokenSource, beginOffset, endOffset);
case BOLDING :
return new BOLDING(TokenType.BOLDING, tokenSource, beginOffset, endOffset);
case APPROXIMATE_THRESHOLD :
return new APPROXIMATE_THRESHOLD(TokenType.APPROXIMATE_THRESHOLD, tokenSource, beginOffset, endOffset);
case CONSTANT :
return new CONSTANT(TokenType.CONSTANT, tokenSource, beginOffset, endOffset);
case MODEL :
return new MODEL(TokenType.MODEL, tokenSource, beginOffset, endOffset);
case ON_FIRST_PHASE :
return new ON_FIRST_PHASE(TokenType.ON_FIRST_PHASE, tokenSource, beginOffset, endOffset);
case QUATERNARY :
return new QUATERNARY(TokenType.QUATERNARY, tokenSource, beginOffset, endOffset);
case TENSOR_TYPE :
return new TENSOR_TYPE(TokenType.TENSOR_TYPE, tokenSource, beginOffset, endOffset);
case DYNAMIC :
return new DYNAMIC(TokenType.DYNAMIC, tokenSource, beginOffset, endOffset);
case UCA :
return new UCA(TokenType.UCA, tokenSource, beginOffset, endOffset);
case ASCENDING :
return new ASCENDING(TokenType.ASCENDING, tokenSource, beginOffset, endOffset);
case FIRST_PHASE :
return new FIRST_PHASE(TokenType.FIRST_PHASE, tokenSource, beginOffset, endOffset);
case SINGLEQUOTEDSTRING :
return new SINGLEQUOTEDSTRING(TokenType.SINGLEQUOTEDSTRING, tokenSource, beginOffset, endOffset);
case USE_MODEL :
return new USE_MODEL(TokenType.USE_MODEL, tokenSource, beginOffset, endOffset);
case MATCH_PHASE :
return new MATCH_PHASE(TokenType.MATCH_PHASE, tokenSource, beginOffset, endOffset);
case SORTING :
return new SORTING(TokenType.SORTING, tokenSource, beginOffset, endOffset);
case MAX_OCCURRENCES :
return new MAX_OCCURRENCES(TokenType.MAX_OCCURRENCES, tokenSource, beginOffset, endOffset);
case MUTATE :
return new MUTATE(TokenType.MUTATE, tokenSource, beginOffset, endOffset);
case NORMAL :
return new NORMAL(TokenType.NORMAL, tokenSource, beginOffset, endOffset);
case ARRAY :
return new ARRAY(TokenType.ARRAY, tokenSource, beginOffset, endOffset);
case MTOKEN :
return new MTOKEN(TokenType.MTOKEN, tokenSource, beginOffset, endOffset);
case SUBSTRING :
return new SUBSTRING(TokenType.SUBSTRING, tokenSource, beginOffset, endOffset);
case ALIAS :
return new ALIAS(TokenType.ALIAS, tokenSource, beginOffset, endOffset);
case ON_DEMAND :
return new ON_DEMAND(TokenType.ON_DEMAND, tokenSource, beginOffset, endOffset);
case POST_FILTER_THRESHOLD :
return new POST_FILTER_THRESHOLD(TokenType.POST_FILTER_THRESHOLD, tokenSource, beginOffset, endOffset);
case SUMMARY :
return new SUMMARY(TokenType.SUMMARY, tokenSource, beginOffset, endOffset);
case RANKFEATURES_SL :
return new RANKFEATURES_SL(TokenType.RANKFEATURES_SL, tokenSource, beginOffset, endOffset);
case INLINE :
return new INLINE(TokenType.INLINE, tokenSource, beginOffset, endOffset);
case GPU_DEVICE :
return new GPU_DEVICE(TokenType.GPU_DEVICE, tokenSource, beginOffset, endOffset);
case EVALUATION_POINT :
return new EVALUATION_POINT(TokenType.EVALUATION_POINT, tokenSource, beginOffset, endOffset);
case EXACT_TERMINATOR :
return new EXACT_TERMINATOR(TokenType.EXACT_TERMINATOR, tokenSource, beginOffset, endOffset);
case LOWER_BOUND :
return new LOWER_BOUND(TokenType.LOWER_BOUND, tokenSource, beginOffset, endOffset);
case IDENTICAL :
return new IDENTICAL(TokenType.IDENTICAL, tokenSource, beginOffset, endOffset);
case TRUE :
return new TRUE(TokenType.TRUE, tokenSource, beginOffset, endOffset);
case STRING_KEYWORD :
return new STRING_KEYWORD(TokenType.STRING_KEYWORD, tokenSource, beginOffset, endOffset);
case MAX_LENGTH :
return new MAX_LENGTH(TokenType.MAX_LENGTH, tokenSource, beginOffset, endOffset);
case MAX_FILTER_COVERAGE :
return new MAX_FILTER_COVERAGE(TokenType.MAX_FILTER_COVERAGE, tokenSource, beginOffset, endOffset);
case FROM_DISK :
return new FROM_DISK(TokenType.FROM_DISK, tokenSource, beginOffset, endOffset);
case WEIGHTEDSET :
return new WEIGHTEDSET(TokenType.WEIGHTEDSET, tokenSource, beginOffset, endOffset);
case PREFIX :
return new PREFIX(TokenType.PREFIX, tokenSource, beginOffset, endOffset);
case STRUCT :
return new STRUCT(TokenType.STRUCT, tokenSource, beginOffset, endOffset);
case NL :
return new NL(TokenType.NL, tokenSource, beginOffset, endOffset);
case CONSTANTS :
return new CONSTANTS(TokenType.CONSTANTS, tokenSource, beginOffset, endOffset);
case FLOAT_KEYWORD :
return new FLOAT_KEYWORD(TokenType.FLOAT_KEYWORD, tokenSource, beginOffset, endOffset);
case HNSW :
return new HNSW(TokenType.HNSW, tokenSource, beginOffset, endOffset);
case FAST_ACCESS :
return new FAST_ACCESS(TokenType.FAST_ACCESS, tokenSource, beginOffset, endOffset);
case SUMMARY_TO :
return new SUMMARY_TO(TokenType.SUMMARY_TO, tokenSource, beginOffset, endOffset);
case SUMMARYFEATURES_SL :
return new SUMMARYFEATURES_SL(TokenType.SUMMARYFEATURES_SL, tokenSource, beginOffset, endOffset);
case SINGLE_LINE_COMMENT :
return new SINGLE_LINE_COMMENT(TokenType.SINGLE_LINE_COMMENT, tokenSource, beginOffset, endOffset);
case ON_SECOND_PHASE :
return new ON_SECOND_PHASE(TokenType.ON_SECOND_PHASE, tokenSource, beginOffset, endOffset);
case MAX_TOKEN_LENGTH :
return new MAX_TOKEN_LENGTH(TokenType.MAX_TOKEN_LENGTH, tokenSource, beginOffset, endOffset);
case REFERENCE :
return new REFERENCE(TokenType.REFERENCE, tokenSource, beginOffset, endOffset);
case SEQUENTIAL :
return new SEQUENTIAL(TokenType.SEQUENTIAL, tokenSource, beginOffset, endOffset);
case DIRECT :
return new DIRECT(TokenType.DIRECT, tokenSource, beginOffset, endOffset);
case ON :
return new ON(TokenType.ON, tokenSource, beginOffset, endOffset);
case IDENTIFIER_WITH_DASH :
return new IDENTIFIER_WITH_DASH(TokenType.IDENTIFIER_WITH_DASH, tokenSource, beginOffset, endOffset);
case DOCUMENT_SUMMARY :
return new DOCUMENT_SUMMARY(TokenType.DOCUMENT_SUMMARY, tokenSource, beginOffset, endOffset);
case PRIMARY :
return new PRIMARY(TokenType.PRIMARY, tokenSource, beginOffset, endOffset);
case MAX_HITS :
return new MAX_HITS(TokenType.MAX_HITS, tokenSource, beginOffset, endOffset);
case RAW_AS_BASE64_IN_SUMMARY :
return new RAW_AS_BASE64_IN_SUMMARY(TokenType.RAW_AS_BASE64_IN_SUMMARY, tokenSource, beginOffset, endOffset);
case ENABLE_BM25 :
return new ENABLE_BM25(TokenType.ENABLE_BM25, tokenSource, beginOffset, endOffset);
case TARGET_HITS_MAX_ADJUSTMENT_FACTOR :
return new TARGET_HITS_MAX_ADJUSTMENT_FACTOR(TokenType.TARGET_HITS_MAX_ADJUSTMENT_FACTOR, tokenSource, beginOffset, endOffset);
case MIN_GROUPS :
return new MIN_GROUPS(TokenType.MIN_GROUPS, tokenSource, beginOffset, endOffset);
case MACRO :
return new MACRO(TokenType.MACRO, tokenSource, beginOffset, endOffset);
case NEIGHBORS_TO_EXPLORE_AT_INSERT :
return new NEIGHBORS_TO_EXPLORE_AT_INSERT(TokenType.NEIGHBORS_TO_EXPLORE_AT_INSERT, tokenSource, beginOffset, endOffset);
case LESSTHAN :
return new LESSTHAN(TokenType.LESSTHAN, tokenSource, beginOffset, endOffset);
case GREATERTHAN :
return new GREATERTHAN(TokenType.GREATERTHAN, tokenSource, beginOffset, endOffset);
case MATCH :
return new MATCH(TokenType.MATCH, tokenSource, beginOffset, endOffset);
case INVALID :
return new InvalidToken(tokenSource, beginOffset, endOffset);
default :
return new Token(type, tokenSource, beginOffset, endOffset);
}
}
public String getLocation() {
return getInputSource() + ":" + getBeginLine() + ":" + getBeginColumn();
}
public Node getParent() {
return parent;
}
public void setParent(Node parent) {
this.parent = parent;
}
public boolean isEmpty() {
return length() == 0;
}
public int length() {
return endOffset - beginOffset;
}
public CharSequence subSequence(int start, int end) {
return getTokenSource().subSequence(beginOffset + start, beginOffset + end);
}
public char charAt(int offset) {
return getTokenSource().charAt(beginOffset + offset);
}
/**
* @deprecated Use toString() instead
*/
@Deprecated
public String getImage() {
return getSource();
}
@Override
public String toString() {
return getSource();
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy