All Downloads are FREE. Search and download functionalities are using the official Maven repository.

javacc.Parser.jj Maven / Gradle / Ivy

There is a newer version: 31.0.0
Show newest version

options {
    STATIC = false;
    IGNORE_CASE = true;
    UNICODE_INPUT = true;
}


PARSER_BEGIN(DruidSqlParserImpl)

package org.apache.druid.sql.calcite.parser;

import java.util.List;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlInsert;
import org.apache.calcite.sql.SqlNodeList;
import org.apache.calcite.sql.SqlBasicCall;
import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.java.util.common.granularity.GranularityType;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.sql.calcite.parser.DruidSqlInsert;
import org.apache.druid.sql.calcite.parser.DruidSqlParserUtils;
import org.apache.druid.sql.calcite.external.ExtendOperator;
import org.apache.druid.sql.calcite.external.ParameterizeOperator;
import org.apache.druid.sql.calcite.parser.ExternalDestinationSqlIdentifier;
import java.util.HashMap;

import org.apache.calcite.avatica.util.Casing;
import org.apache.calcite.avatica.util.TimeUnit;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.runtime.CalciteContextException;
import org.apache.calcite.sql.JoinConditionType;
import org.apache.calcite.sql.JoinType;
import org.apache.calcite.sql.SqlAlter;
import org.apache.calcite.sql.SqlBasicTypeNameSpec;
import org.apache.calcite.sql.SqlBinaryOperator;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlCharStringLiteral;
import org.apache.calcite.sql.SqlCollation;
import org.apache.calcite.sql.SqlCollectionTypeNameSpec;
import org.apache.calcite.sql.SqlDataTypeSpec;
import org.apache.calcite.sql.SqlDelete;
import org.apache.calcite.sql.SqlDescribeSchema;
import org.apache.calcite.sql.SqlDescribeTable;
import org.apache.calcite.sql.SqlDynamicParam;
import org.apache.calcite.sql.SqlExplain;
import org.apache.calcite.sql.SqlExplainFormat;
import org.apache.calcite.sql.SqlExplainLevel;
import org.apache.calcite.sql.SqlFunction;
import org.apache.calcite.sql.SqlFunctionCategory;
import org.apache.calcite.sql.SqlHint;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlInsert;
import org.apache.calcite.sql.SqlInsertKeyword;
import org.apache.calcite.sql.SqlIntervalQualifier;
import org.apache.calcite.sql.SqlJdbcDataTypeName;
import org.apache.calcite.sql.SqlJdbcFunctionCall;
import org.apache.calcite.sql.SqlJoin;
import org.apache.calcite.sql.SqlJsonConstructorNullClause;
import org.apache.calcite.sql.SqlJsonEncoding;
import org.apache.calcite.sql.SqlJsonExistsErrorBehavior;
import org.apache.calcite.sql.SqlJsonEmptyOrError;
import org.apache.calcite.sql.SqlJsonQueryEmptyOrErrorBehavior;
import org.apache.calcite.sql.SqlJsonQueryWrapperBehavior;
import org.apache.calcite.sql.SqlJsonValueEmptyOrErrorBehavior;
import org.apache.calcite.sql.SqlJsonValueReturning;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlLiteral;
import org.apache.calcite.sql.SqlMatchRecognize;
import org.apache.calcite.sql.SqlMerge;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlNodeList;
import org.apache.calcite.sql.SqlNumericLiteral;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.SqlOrderBy;
import org.apache.calcite.sql.SqlPivot;
import org.apache.calcite.sql.SqlPostfixOperator;
import org.apache.calcite.sql.SqlPrefixOperator;
import org.apache.calcite.sql.SqlRowTypeNameSpec;
import org.apache.calcite.sql.SqlSampleSpec;
import org.apache.calcite.sql.SqlSelect;
import org.apache.calcite.sql.SqlSelectKeyword;
import org.apache.calcite.sql.SqlSetOption;
import org.apache.calcite.sql.SqlSnapshot;
import org.apache.calcite.sql.SqlTableRef;
import org.apache.calcite.sql.SqlTypeNameSpec;
import org.apache.calcite.sql.SqlUnnestOperator;
import org.apache.calcite.sql.SqlUnpivot;
import org.apache.calcite.sql.SqlUpdate;
import org.apache.calcite.sql.SqlUserDefinedTypeNameSpec;
import org.apache.calcite.sql.SqlUtil;
import org.apache.calcite.sql.SqlWindow;
import org.apache.calcite.sql.SqlWith;
import org.apache.calcite.sql.SqlWithItem;
import org.apache.calcite.sql.fun.SqlCase;
import org.apache.calcite.sql.fun.SqlInternalOperators;
import org.apache.calcite.sql.fun.SqlLibraryOperators;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.calcite.sql.fun.SqlTrimFunction;
import org.apache.calcite.sql.parser.Span;
import org.apache.calcite.sql.parser.SqlAbstractParserImpl;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.calcite.sql.parser.SqlParserImplFactory;
import org.apache.calcite.sql.parser.SqlParserPos;
import org.apache.calcite.sql.parser.SqlParserUtil;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.sql.validate.SqlConformance;
import org.apache.calcite.sql.validate.SqlConformanceEnum;
import org.apache.calcite.util.Glossary;
import org.apache.calcite.util.Pair;
import org.apache.calcite.util.SourceStringReader;
import org.apache.calcite.util.Util;
import org.apache.calcite.util.trace.CalciteTrace;

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.slf4j.Logger;

import java.io.Reader;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;

import static org.apache.calcite.util.Static.RESOURCE;

/**
 * SQL parser, generated from Parser.jj by JavaCC.
 *
 * 

The public wrapper for this parser is {@link SqlParser}. */ public class DruidSqlParserImpl extends SqlAbstractParserImpl { private static final Logger LOGGER = CalciteTrace.getParserTracer(); // Can't use quoted literal because of a bug in how JavaCC translates // backslash-backslash. private static final char BACKSLASH = 0x5c; private static final char DOUBLE_QUOTE = 0x22; private static final String DQ = DOUBLE_QUOTE + ""; private static final String DQDQ = DQ + DQ; private static final SqlLiteral LITERAL_ZERO = SqlLiteral.createExactNumeric("0", SqlParserPos.ZERO); private static final SqlLiteral LITERAL_ONE = SqlLiteral.createExactNumeric("1", SqlParserPos.ZERO); private static final SqlLiteral LITERAL_MINUS_ONE = SqlLiteral.createExactNumeric("-1", SqlParserPos.ZERO); private static Metadata metadata; private Casing unquotedCasing; private Casing quotedCasing; private int identifierMaxLength; private SqlConformance conformance; /** * {@link SqlParserImplFactory} implementation for creating parser. */ public static final SqlParserImplFactory FACTORY = new SqlParserImplFactory() { public SqlAbstractParserImpl getParser(Reader reader) { final DruidSqlParserImpl parser = new DruidSqlParserImpl(reader); if (reader instanceof SourceStringReader) { final String sql = ((SourceStringReader) reader).getSourceString(); parser.setOriginalSql(sql); } return parser; } }; public SqlParseException normalizeException(Throwable ex) { try { if (ex instanceof ParseException) { ex = cleanupParseException((ParseException) ex); } return convertException(ex); } catch (ParseException e) { throw new AssertionError(e); } } public Metadata getMetadata() { synchronized (DruidSqlParserImpl.class) { if (metadata == null) { metadata = new MetadataImpl( new DruidSqlParserImpl(new java.io.StringReader(""))); } return metadata; } } public void setTabSize(int tabSize) { jj_input_stream.setTabSize(tabSize); } public void switchTo(SqlAbstractParserImpl.LexicalState state) { final int stateOrdinal = Arrays.asList(DruidSqlParserImplTokenManager.lexStateNames) .indexOf(state.name()); token_source.SwitchTo(stateOrdinal); } public void setQuotedCasing(Casing quotedCasing) { this.quotedCasing = quotedCasing; } public void setUnquotedCasing(Casing unquotedCasing) { this.unquotedCasing = unquotedCasing; } public void setIdentifierMaxLength(int identifierMaxLength) { this.identifierMaxLength = identifierMaxLength; } public void setConformance(SqlConformance conformance) { this.conformance = conformance; } public SqlNode parseSqlExpressionEof() throws Exception { return SqlExpressionEof(); } public SqlNode parseSqlStmtEof() throws Exception { return SqlStmtEof(); } public SqlNodeList parseSqlStmtList() throws Exception { return SqlStmtList(); } public SqlNode parseArray() throws SqlParseException { switchTo(LexicalState.BQID); try { return ArrayLiteral(); } catch (ParseException ex) { throw normalizeException(ex); } catch (TokenMgrError ex) { throw normalizeException(ex); } } private SqlNode extend(SqlNode table, SqlNodeList extendList) { return SqlStdOperatorTable.EXTEND.createCall( Span.of(table, extendList).pos(), table, extendList); } /** Adds a warning that a token such as "HOURS" was used, * whereas the SQL standard only allows "HOUR". * *

Currently, we silently add an exception to a list of warnings. In * future, we may have better compliance checking, for example a strict * compliance mode that throws if any non-standard features are used. */ private TimeUnit warn(TimeUnit timeUnit) throws ParseException { final String token = getToken(0).image.toUpperCase(Locale.ROOT); warnings.add( SqlUtil.newContextException(getPos(), RESOURCE.nonStandardFeatureUsed(token))); return timeUnit; } } PARSER_END(DruidSqlParserImpl) /*************************************** * Utility Codes for Semantic Analysis * ***************************************/ /* For Debug */ JAVACODE void debug_message1() { LOGGER.info("{} , {}", getToken(0).image, getToken(1).image); } JAVACODE String unquotedIdentifier() { return SqlParserUtil.toCase(getToken(0).image, unquotedCasing); } /** * Allows parser to be extended with new types of table references. The * default implementation of this production is empty. */ SqlNode ExtendedTableRef() : { } { UnusedExtension() { return null; } } /** * Allows an OVER clause following a table expression as an extension to * standard SQL syntax. The default implementation of this production is empty. */ SqlNode TableOverOpt() : { } { { return null; } } /* * Parses dialect-specific keywords immediately following the SELECT keyword. */ void SqlSelectKeywords(List keywords) : {} { E() } /* * Parses dialect-specific keywords immediately following the INSERT keyword. */ void SqlInsertKeywords(List keywords) : {} { E() } /* * Parse Floor/Ceil function parameters */ SqlNode FloorCeilOptions(Span s, boolean floorFlag) : { SqlNode node; } { node = StandardFloorCeilOptions(s, floorFlag) { return node; } } /* // This file contains the heart of a parser for SQL SELECT statements. // code can be shared between various parsers (for example, a DDL parser and a // DML parser) but is not a standalone JavaCC file. You need to prepend a // parser declaration (such as that in Parser.jj). */ /* Epsilon */ JAVACODE void E() {} /** @Deprecated */ JAVACODE List startList(Object o) { List list = new ArrayList(); list.add(o); return list; } /* * NOTE jvs 6-Feb-2004: The straightforward way to implement the SQL grammar is * to keep query expressions (SELECT, UNION, etc) separate from row expressions * (+, LIKE, etc). However, this is not possible with an LL(k) parser, because * both kinds of expressions allow parenthesization, so no fixed amount of left * context is ever good enough. A sub-query can be a leaf in a row expression, * and can include operators like UNION, so it's not even possible to use a * syntactic lookahead rule like "look past an indefinite number of parentheses * until you see SELECT, VALUES, or TABLE" (since at that point we still * don't know whether we're parsing a sub-query like ((select ...) + x) * vs. (select ... union select ...). * * The somewhat messy solution is to unify the two kinds of expression, * and to enforce syntax rules using parameterized context. This * is the purpose of the ExprContext parameter. It is passed to * most expression productions, which check the expressions encountered * against the context for correctness. When a query * element like SELECT is encountered, the production calls * checkQueryExpression, which will throw an exception if * a row expression was expected instead. When a row expression like * IN is encountered, the production calls checkNonQueryExpression * instead. It is very important to understand how this works * when modifying the grammar. * * The commingling of expressions results in some bogus ambiguities which are * resolved with LOOKAHEAD hints. The worst example is comma. SQL allows both * (WHERE x IN (1,2)) and (WHERE x IN (select ...)). This means when we parse * the right-hand-side of an IN, we have to allow any kind of expression inside * the parentheses. Now consider the expression "WHERE x IN(SELECT a FROM b * GROUP BY c,d)". When the parser gets to "c,d" it doesn't know whether the * comma indicates the end of the GROUP BY or the end of one item in an IN * list. Luckily, we know that select and comma-list are mutually exclusive * within IN, so we use maximal munch for the GROUP BY comma. However, this * usage of hints could easily mask unintended ambiguities resulting from * future changes to the grammar, making it very brittle. */ JAVACODE protected SqlParserPos getPos() { return new SqlParserPos( token.beginLine, token.beginColumn, token.endLine, token.endColumn); } /** Starts a span at the current position. */ JAVACODE Span span() { return Span.of(getPos()); } JAVACODE void checkQueryExpression(ExprContext exprContext) { switch (exprContext) { case ACCEPT_NON_QUERY: case ACCEPT_SUB_QUERY: case ACCEPT_CURSOR: throw SqlUtil.newContextException(getPos(), RESOURCE.illegalQueryExpression()); } } JAVACODE void checkNonQueryExpression(ExprContext exprContext) { switch (exprContext) { case ACCEPT_QUERY: throw SqlUtil.newContextException(getPos(), RESOURCE.illegalNonQueryExpression()); } } JAVACODE SqlNode checkNotJoin(SqlNode e) { if (e instanceof SqlJoin) { throw SqlUtil.newContextException(e.getParserPosition(), RESOURCE.illegalJoinExpression()); } return e; } /** * Converts a ParseException (local to this particular instantiation * of the parser) into a SqlParseException (common to all parsers). */ JAVACODE SqlParseException convertException(Throwable ex) { if (ex instanceof SqlParseException) { return (SqlParseException) ex; } SqlParserPos pos = null; int[][] expectedTokenSequences = null; String[] tokenImage = null; if (ex instanceof ParseException) { ParseException pex = (ParseException) ex; expectedTokenSequences = pex.expectedTokenSequences; tokenImage = pex.tokenImage; if (pex.currentToken != null) { final Token token = pex.currentToken.next; // Checks token.image.equals("1") to avoid recursive call. // The SqlAbstractParserImpl#MetadataImpl constructor uses constant "1" to // throw intentionally to collect the expected tokens. if (!token.image.equals("1") && getMetadata().isKeyword(token.image) && SqlParserUtil.allowsIdentifier(tokenImage, expectedTokenSequences)) { // If the next token is a keyword, reformat the error message as: // Incorrect syntax near the keyword '{keyword}' at line {line_number}, // column {column_number}. final String expecting = ex.getMessage() .substring(ex.getMessage().indexOf("Was expecting")); final String errorMsg = String.format("Incorrect syntax near the keyword '%s' " + "at line %d, column %d.\n%s", token.image, token.beginLine, token.beginColumn, expecting); // Replace the ParseException with explicit error message. ex = new ParseException(errorMsg); } pos = new SqlParserPos( token.beginLine, token.beginColumn, token.endLine, token.endColumn); } } else if (ex instanceof TokenMgrError) { expectedTokenSequences = null; tokenImage = null; // Example: // Lexical error at line 3, column 24. Encountered "#" after "a". final java.util.regex.Pattern pattern = java.util.regex.Pattern.compile( "(?s)Lexical error at line ([0-9]+), column ([0-9]+).*"); java.util.regex.Matcher matcher = pattern.matcher(ex.getMessage()); if (matcher.matches()) { int line = Integer.parseInt(matcher.group(1)); int column = Integer.parseInt(matcher.group(2)); pos = new SqlParserPos(line, column, line, column); } } else if (ex instanceof CalciteContextException) { // CalciteContextException is the standard wrapper for exceptions // produced by the validator, but in the parser, the standard is // SqlParseException; so, strip it away. In case you were wondering, // the CalciteContextException appears because the parser // occasionally calls into validator-style code such as // SqlSpecialOperator.reduceExpr. CalciteContextException ece = (CalciteContextException) ex; pos = new SqlParserPos( ece.getPosLine(), ece.getPosColumn(), ece.getEndPosLine(), ece.getEndPosColumn()); ex = ece.getCause(); } return new SqlParseException( ex.getMessage(), pos, expectedTokenSequences, tokenImage, ex); } /** * Removes or transforms misleading information from a parse exception. * * @param e dirty excn * * @return clean excn */ JAVACODE ParseException cleanupParseException(ParseException ex) { if (ex.expectedTokenSequences == null) { return ex; } int iIdentifier = Arrays.asList(ex.tokenImage).indexOf(""); // Find all sequences in the error which contain identifier. For // example, // {} // {A} // {B, C} // {D, } // {D, A} // {D, B} // // would yield // {} // {D} final List prefixList = new ArrayList(); for (int i = 0; i < ex.expectedTokenSequences.length; ++i) { int[] seq = ex.expectedTokenSequences[i]; int j = seq.length - 1; int i1 = seq[j]; if (i1 == iIdentifier) { int[] prefix = new int[j]; System.arraycopy(seq, 0, prefix, 0, j); prefixList.add(prefix); } } if (prefixList.isEmpty()) { return ex; } int[][] prefixes = (int[][]) prefixList.toArray(new int[prefixList.size()][]); // Since was one of the possible productions, // we know that the parser will also have included all // of the non-reserved keywords (which are treated as // identifiers in non-keyword contexts). So, now we need // to clean those out, since they're totally irrelevant. final List list = new ArrayList(); Metadata metadata = getMetadata(); for (int i = 0; i < ex.expectedTokenSequences.length; ++i) { int [] seq = ex.expectedTokenSequences[i]; String tokenImage = ex.tokenImage[seq[seq.length - 1]]; String token = SqlParserUtil.getTokenVal(tokenImage); if (token == null || !metadata.isNonReservedKeyword(token)) { list.add(seq); continue; } boolean match = matchesPrefix(seq, prefixes); if (!match) { list.add(seq); } } ex.expectedTokenSequences = (int [][]) list.toArray(new int [list.size()][]); return ex; } JAVACODE boolean matchesPrefix(int[] seq, int[][] prefixes) { nextPrefix: for (int[] prefix : prefixes) { if (seq.length == prefix.length + 1) { for (int k = 0; k < prefix.length; k++) { if (prefix[k] != seq[k]) { continue nextPrefix; } } return true; } } return false; } /***************************************** * Syntactical Descriptions * *****************************************/ SqlNode ExprOrJoinOrOrderedQuery(ExprContext exprContext) : { SqlNode e; final List list = new ArrayList(); } { // Lookhead to distinguish between "TABLE emp" (which will be // matched by ExplicitTable() via Query()) // and "TABLE fun(args)" (which will be matched by TableRef()) ( LOOKAHEAD(2) e = Query(exprContext) e = OrderByLimitOpt(e) { return e; } | e = TableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) ( e = JoinTable(e) )* { list.add(e); } ( AddSetOpQuery(list, exprContext) )* { return SqlParserUtil.toTree(list); } ) } /** * Parses either a row expression or a query expression with an optional * ORDER BY. * *

Postgres syntax for limit: * *

 *    [ LIMIT { count | ALL } ]
 *    [ OFFSET start ]
*
* *

Trino syntax for limit: * *

 *    [ OFFSET start ]
 *    [ LIMIT { count | ALL } ]
*
* *

MySQL syntax for limit: * *

 *    [ LIMIT { count | start, count } ]
*
* *

SQL:2008 syntax for limit: * *

 *    [ OFFSET start { ROW | ROWS } ]
 *    [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]
*
*/ SqlNode OrderedQueryOrExpr(ExprContext exprContext) : { SqlNode e; } { e = QueryOrExpr(exprContext) e = OrderByLimitOpt(e) { return e; } } /** Reads optional "ORDER BY", "LIMIT", "OFFSET", "FETCH" following a query, * {@code e}. If any of them are present, adds them to the query; * otherwise returns the query unchanged. * Throws if they are present and {@code e} is not a query. */ SqlNode OrderByLimitOpt(SqlNode e) : { final SqlNodeList orderBy; final Span s = Span.of(); SqlNode[] offsetFetch = {null, null}; } { ( // use the syntactic type of the expression we just parsed // to decide whether ORDER BY makes sense orderBy = OrderBy(e.isA(SqlKind.QUERY)) | { orderBy = null; } ) [ LimitClause(s, offsetFetch) [ OffsetClause(s, offsetFetch) ] | OffsetClause(s, offsetFetch) [ LimitClause(s, offsetFetch) { if (!this.conformance.isOffsetLimitAllowed()) { throw SqlUtil.newContextException(s.end(this), RESOURCE.offsetLimitNotAllowed()); } } | FetchClause(offsetFetch) ] | FetchClause(offsetFetch) ] { if (orderBy != null || offsetFetch[0] != null || offsetFetch[1] != null) { return new SqlOrderBy(getPos(), e, Util.first(orderBy, SqlNodeList.EMPTY), offsetFetch[0], offsetFetch[1]); } return e; } } /** * Parses an OFFSET clause in an ORDER BY expression. */ void OffsetClause(Span s, SqlNode[] offsetFetch) : { } { // ROW or ROWS is required in SQL:2008 but we make it optional // because it is not present in Postgres-style syntax. { s.add(this); } offsetFetch[0] = UnsignedNumericLiteralOrParam() [ | ] } /** * Parses a FETCH clause in an ORDER BY expression. */ void FetchClause(SqlNode[] offsetFetch) : { } { // SQL:2008-style syntax. "OFFSET ... FETCH ...". // If you specify both LIMIT and FETCH, FETCH wins. ( | ) offsetFetch[1] = UnsignedNumericLiteralOrParam() ( | ) } /** * Parses a LIMIT clause in an ORDER BY expression. */ void LimitClause(Span s, SqlNode[] offsetFetch) : { } { // Postgres-style syntax. "LIMIT ... OFFSET ..." { s.add(this); } ( // MySQL-style syntax. "LIMIT start, count" LOOKAHEAD(2) offsetFetch[0] = UnsignedNumericLiteralOrParam() offsetFetch[1] = UnsignedNumericLiteralOrParam() { if (!this.conformance.isLimitStartCountAllowed()) { throw SqlUtil.newContextException(s.end(this), RESOURCE.limitStartCountNotAllowed()); } } | offsetFetch[1] = UnsignedNumericLiteralOrParam() | ) } /** * Parses a leaf in a query expression (SELECT, VALUES or TABLE). */ SqlNode LeafQuery(ExprContext exprContext) : { SqlNode e; } { { // ensure a query is legal in this context checkQueryExpression(exprContext); } e = SqlSelect() { return e; } | e = TableConstructor() { return e; } | e = ExplicitTable(getPos()) { return e; } } /** * Parses a parenthesized query or single row expression. * Depending on {@code exprContext}, may also accept a join. */ SqlNode ParenthesizedExpression(ExprContext exprContext) : { SqlNode e; } { { // we've now seen left paren, so queries inside should // be allowed as sub-queries switch (exprContext) { case ACCEPT_SUB_QUERY: exprContext = ExprContext.ACCEPT_NONCURSOR; break; case ACCEPT_CURSOR: exprContext = ExprContext.ACCEPT_ALL; break; } } e = ExprOrJoinOrOrderedQuery(exprContext) { exprContext.throwIfNotCompatible(e); return e; } } /** * Parses a parenthesized query or comma-list of row expressions. * *

REVIEW jvs 8-Feb-2004: There's a small hole in this production. It can be * used to construct something like * *

 * WHERE x IN (select count(*) from t where c=d,5)
*
* *

which should be illegal. The above is interpreted as equivalent to * *

 * WHERE x IN ((select count(*) from t where c=d),5)
*
* *

which is a legal use of a sub-query. The only way to fix the hole is to * be able to remember whether a subexpression was parenthesized or not, which * means preserving parentheses in the SqlNode tree. This is probably * desirable anyway for use in purely syntactic parsing applications (e.g. SQL * pretty-printer). However, if this is done, it's important to also make * isA() on the paren node call down to its operand so that we can * always correctly discriminate a query from a row expression. */ SqlNodeList ParenthesizedQueryOrCommaList( ExprContext exprContext) : { SqlNode e; final List list = new ArrayList(); ExprContext firstExprContext = exprContext; final Span s; } { { // we've now seen left paren, so a query by itself should // be interpreted as a sub-query s = span(); switch (exprContext) { case ACCEPT_SUB_QUERY: firstExprContext = ExprContext.ACCEPT_NONCURSOR; break; case ACCEPT_CURSOR: firstExprContext = ExprContext.ACCEPT_ALL; break; } } e = OrderedQueryOrExpr(firstExprContext) { list.add(e); } ( { // a comma-list can't appear where only a query is expected checkNonQueryExpression(exprContext); } AddExpression(list, exprContext) )* { return new SqlNodeList(list, s.end(this)); } } /** As ParenthesizedQueryOrCommaList, but allows DEFAULT * in place of any of the expressions. For example, * {@code (x, DEFAULT, null, DEFAULT)}. */ SqlNodeList ParenthesizedQueryOrCommaListWithDefault( ExprContext exprContext) : { SqlNode e; final List list = new ArrayList(); ExprContext firstExprContext = exprContext; final Span s; } { { // we've now seen left paren, so a query by itself should // be interpreted as a sub-query s = span(); switch (exprContext) { case ACCEPT_SUB_QUERY: firstExprContext = ExprContext.ACCEPT_NONCURSOR; break; case ACCEPT_CURSOR: firstExprContext = ExprContext.ACCEPT_ALL; break; } } ( e = OrderedQueryOrExpr(firstExprContext) { list.add(e); } | e = Default() { list.add(e); } ) ( { // a comma-list can't appear where only a query is expected checkNonQueryExpression(exprContext); } ( e = Expression(exprContext) { list.add(e); } | e = Default() { list.add(e); } ) )* { return new SqlNodeList(list, s.end(this)); } } /** * Parses function parameter lists. * If the list starts with DISTINCT or ALL, it is discarded. */ List UnquantifiedFunctionParameterList(ExprContext exprContext) : { final List args; } { args = FunctionParameterList(exprContext) { args.remove(0); // remove DISTINCT or ALL, if present return args; } } /** * Parses function parameter lists including DISTINCT keyword recognition, * DEFAULT, and named argument assignment. */ List FunctionParameterList(ExprContext exprContext) : { final SqlLiteral qualifier; final List list = new ArrayList(); } { ( qualifier = AllOrDistinct() { list.add(qualifier); } | { list.add(null); } ) AddArg0(list, exprContext) ( { // a comma-list can't appear where only a query is expected checkNonQueryExpression(exprContext); } AddArg(list, exprContext) )* { return list; } } SqlLiteral AllOrDistinct() : { } { { return SqlSelectKeyword.DISTINCT.symbol(getPos()); } | { return SqlSelectKeyword.ALL.symbol(getPos()); } } void AddArg0(List list, ExprContext exprContext) : { final SqlIdentifier name; SqlNode e; final ExprContext firstExprContext; { // we've now seen left paren, so queries inside should // be allowed as sub-queries switch (exprContext) { case ACCEPT_SUB_QUERY: firstExprContext = ExprContext.ACCEPT_NONCURSOR; break; case ACCEPT_CURSOR: firstExprContext = ExprContext.ACCEPT_ALL; break; default: firstExprContext = exprContext; break; } } } { ( LOOKAHEAD(2) name = SimpleIdentifier() | { name = null; } ) ( e = Default() | LOOKAHEAD(3) e = TableParam() | e = PartitionedQueryOrQueryOrExpr(firstExprContext) ) { if (name != null) { e = SqlStdOperatorTable.ARGUMENT_ASSIGNMENT.createCall( Span.of(name, e).pos(), e, name); } list.add(e); } } void AddArg(List list, ExprContext exprContext) : { final SqlIdentifier name; SqlNode e; } { ( LOOKAHEAD(2) name = SimpleIdentifier() | { name = null; } ) ( e = Default() | e = Expression(exprContext) | e = TableParam() ) { if (name != null) { e = SqlStdOperatorTable.ARGUMENT_ASSIGNMENT.createCall( Span.of(name, e).pos(), e, name); } list.add(e); } } SqlNode Default() : {} { { return SqlStdOperatorTable.DEFAULT.createCall(getPos()); } } /** * Parses a query (SELECT, UNION, INTERSECT, EXCEPT, VALUES, TABLE) followed by * the end-of-file symbol. */ SqlNode SqlQueryEof() : { SqlNode query; } { query = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) { return query; } } /** * Parses a list of SQL statements separated by semicolon. * The semicolon is required between statements, but is * optional at the end. */ SqlNodeList SqlStmtList() : { final List stmtList = new ArrayList(); SqlNode stmt; } { stmt = SqlStmt() { stmtList.add(stmt); } ( [ stmt = SqlStmt() { stmtList.add(stmt); } ] )* { return new SqlNodeList(stmtList, Span.of(stmtList).pos()); } } /** * Parses an SQL statement. */ SqlNode SqlStmt() : { SqlNode stmt; } { ( LOOKAHEAD(2) stmt = DruidSqlInsertEof() | LOOKAHEAD(2) stmt = DruidSqlExplain() | LOOKAHEAD(2) stmt = DruidSqlReplaceEof() | stmt = SqlSetOption(Span.of(), null) | stmt = SqlAlter() | stmt = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) | stmt = SqlExplain() | stmt = SqlDescribe() | stmt = SqlInsert() | stmt = SqlDelete() | stmt = SqlUpdate() | stmt = SqlMerge() | stmt = SqlProcedureCall() ) { return stmt; } } /** * Parses an SQL statement followed by the end-of-file symbol. */ SqlNode SqlStmtEof() : { SqlNode stmt; } { stmt = SqlStmt() { return stmt; } } /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ SqlGranularityLiteral PartitionGranularity() : { SqlNode e; Granularity granularity; String unparseString; } { ( { granularity = Granularities.HOUR; unparseString = "HOUR"; } | { granularity = Granularities.DAY; unparseString = "DAY"; } | { granularity = Granularities.MONTH; unparseString = "MONTH"; } | { granularity = Granularities.YEAR; unparseString = "YEAR"; } | { granularity = Granularities.ALL; unparseString = "ALL"; } [

FROM is mandatory in standard SQL, optional in dialects such as MySQL, * PostgreSQL. The parser allows SELECT without FROM, but the validator fails * if conformance is, say, STRICT_2003. */ SqlNode DruidFromClause() : { SqlNode e, e2; SqlLiteral joinType; } { e = DruidJoin() ( // Comma joins should only occur at top-level in the FROM clause. // Valid: // * FROM a, b // * FROM (a CROSS JOIN b), c // Not valid: // * FROM a CROSS JOIN (b, c) LOOKAHEAD(1) { joinType = JoinType.COMMA.symbol(getPos()); } e2 = DruidJoin() { e = new SqlJoin(joinType.getParserPosition(), e, SqlLiteral.createBoolean(false, joinType.getParserPosition()), joinType, e2, JoinConditionType.NONE.symbol(SqlParserPos.ZERO), null); } )* { return e; } } SqlNode DruidJoin() : { SqlNode e; } { e = DruidTableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) ( LOOKAHEAD(2) e = DruidJoinTable(e) )* { return e; } } /** Matches "LEFT JOIN t ON ...", "RIGHT JOIN t USING ...", "JOIN t". */ SqlNode DruidJoinTable(SqlNode e) : { SqlNode e2, condition; final SqlLiteral natural, joinType, on, using; SqlNodeList list; } { // LOOKAHEAD(3) is needed here rather than a LOOKAHEAD(2) because JavaCC // calculates minimum lookahead count incorrectly for choice that contains // zero size child. For instance, with the generated code, // "LOOKAHEAD(2, Natural(), JoinType())" // returns true immediately if it sees a single "" token. Where we // expect the lookahead succeeds after " ". // // For more information about the issue, // see https://github.com/javacc/javacc/issues/86 // // We allow CROSS JOIN (joinType = CROSS_JOIN) to have a join condition, // even though that is not valid SQL; the validator will catch it. LOOKAHEAD(3) natural = Natural() joinType = JoinType() e2 = DruidTableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) ( { on = JoinConditionType.ON.symbol(getPos()); } condition = Expression(ExprContext.ACCEPT_SUB_QUERY) { return new SqlJoin(joinType.getParserPosition(), e, natural, joinType, e2, on, condition); } | { using = JoinConditionType.USING.symbol(getPos()); } list = ParenthesizedSimpleIdentifierList() { return new SqlJoin(joinType.getParserPosition(), e, natural, joinType, e2, using, new SqlNodeList(list, Span.of(using).end(this))); } | { return new SqlJoin(joinType.getParserPosition(), e, natural, joinType, e2, JoinConditionType.NONE.symbol(joinType.getParserPosition()), null); } ) | { joinType = JoinType.CROSS.symbol(getPos()); } e2 = DruidTableRef2(true) { if (!this.conformance.isApplyAllowed()) { throw SqlUtil.newContextException(getPos(), RESOURCE.applyNotAllowed()); } return new SqlJoin(joinType.getParserPosition(), e, SqlLiteral.createBoolean(false, joinType.getParserPosition()), joinType, e2, JoinConditionType.NONE.symbol(SqlParserPos.ZERO), null); } | { joinType = JoinType.LEFT.symbol(getPos()); } e2 = DruidTableRef2(true) { if (!this.conformance.isApplyAllowed()) { throw SqlUtil.newContextException(getPos(), RESOURCE.applyNotAllowed()); } return new SqlJoin(joinType.getParserPosition(), e, SqlLiteral.createBoolean(false, joinType.getParserPosition()), joinType, e2, JoinConditionType.ON.symbol(SqlParserPos.ZERO), SqlLiteral.createBoolean(true, joinType.getParserPosition())); } } /** * Parses a table reference in a FROM clause, not lateral unless LATERAL * is explicitly specified. */ SqlNode DruidTableRef() : { final SqlNode e; } { e = DruidTableRef3(ExprContext.ACCEPT_QUERY, false) { return e; } } SqlNode DruidTableRef1(ExprContext exprContext) : { final SqlNode e; } { e = DruidTableRef3(exprContext, false) { return e; } } /** * Parses a table reference in a FROM clause. */ SqlNode DruidTableRef2(boolean lateral) : { final SqlNode e; } { e = DruidTableRef3(ExprContext.ACCEPT_QUERY, lateral) { return e; } } SqlNode DruidTableRef3(ExprContext exprContext, boolean lateral) : { final SqlIdentifier tableName; SqlNode tableRef; List paramList; final SqlIdentifier alias; final Span s; SqlNodeList args; final SqlNodeList columnAliasList; SqlUnnestOperator unnestOp = SqlStdOperatorTable.UNNEST; SqlNodeList extendList = null; } { ( LOOKAHEAD(2) tableName = CompoundTableIdentifier() ( tableRef = TableHints(tableName) | { tableRef = tableName; } ) // BEGIN: Druid-specific code [ paramList = FunctionParameterList(ExprContext.ACCEPT_NONCURSOR) { tableRef = ParameterizeOperator.PARAM.createCall(tableRef, paramList); } ] // END: Druid-specific code tableRef = Over(tableRef) [ tableRef = Snapshot(tableRef) ] [ tableRef = MatchRecognize(tableRef) ] | LOOKAHEAD(2) [ { lateral = true; } ] tableRef = ParenthesizedExpression(exprContext) tableRef = Over(tableRef) tableRef = addLateral(tableRef, lateral) [ tableRef = MatchRecognize(tableRef) ] | { s = span(); } args = ParenthesizedQueryOrCommaList(ExprContext.ACCEPT_SUB_QUERY) [ { unnestOp = SqlStdOperatorTable.UNNEST_WITH_ORDINALITY; } ] { tableRef = unnestOp.createCall(s.end(this), (List) args); } | [ { lateral = true; } ] tableRef = TableFunctionCall() // BEGIN: Druid-specific code [ [ ] extendList = ExtendList() { tableRef = ExtendOperator.EXTEND.createCall( Span.of(tableRef, extendList).pos(), tableRef, extendList); } ] // END: Druid-specific code tableRef = addLateral(tableRef, lateral) | tableRef = ExtendedTableRef() ) [ LOOKAHEAD(2) tableRef = Pivot(tableRef) ] [ LOOKAHEAD(2) tableRef = Unpivot(tableRef) ] [ [ ] alias = SimpleIdentifier() ( columnAliasList = ParenthesizedSimpleIdentifierList() | { columnAliasList = null; } ) { // Standard SQL (and Postgres) allow applying "AS alias" to a JOIN, // e.g. "FROM (a CROSS JOIN b) AS c". The new alias obscures the // internal aliases, and columns cannot be referenced if they are // not unique. TODO: Support this behavior; see // [CALCITE-5168] Allow AS after parenthesized JOIN checkNotJoin(tableRef); if (columnAliasList == null) { tableRef = SqlStdOperatorTable.AS.createCall( Span.of(tableRef).end(this), tableRef, alias); } else { List idList = new ArrayList(); idList.add(tableRef); idList.add(alias); idList.addAll(columnAliasList.getList()); tableRef = SqlStdOperatorTable.AS.createCall( Span.of(tableRef).end(this), idList); } } ] [ tableRef = Tablesample(tableRef) ] { return tableRef; } } SqlNodeList ParenthesizedKeyValueOptionCommaList() : { final Span s; final List list = new ArrayList(); } { { s = span(); } AddKeyValueOption(list) ( AddKeyValueOption(list) )* { return new SqlNodeList(list, s.end(this)); } } /** * Parses an option with format key=val whose key is a simple identifier or string literal * and value is a string literal. */ void AddKeyValueOption(List list) : { final SqlNode key; final SqlNode value; } { ( key = SimpleIdentifier() | key = StringLiteral() ) value = StringLiteral() { list.add(key); list.add(value); } } /** Parses an option value (either a string or a numeric) and adds to a list. */ void AddOptionValue(List list) : { final SqlNode value; } { ( value = NumericLiteral() { list.add(value); } | value = StringLiteral() { list.add(value); } ) } /** * Parses a literal list separated by comma. The literal is either a string or a numeric. */ SqlNodeList ParenthesizedLiteralOptionCommaList() : { final Span s; final List list = new ArrayList(); } { { s = span(); } AddOptionValue(list) ( AddOptionValue(list) )* { return new SqlNodeList(list, s.end(this)); } } void AddHint(List hints) : { final SqlIdentifier hintName; final SqlNodeList hintOptions; final SqlHint.HintOptionFormat optionFormat; } { hintName = SimpleIdentifier() ( LOOKAHEAD(5) hintOptions = ParenthesizedKeyValueOptionCommaList() { optionFormat = SqlHint.HintOptionFormat.KV_LIST; } | LOOKAHEAD(3) hintOptions = ParenthesizedSimpleIdentifierList() { optionFormat = SqlHint.HintOptionFormat.ID_LIST; } | LOOKAHEAD(3) hintOptions = ParenthesizedLiteralOptionCommaList() { optionFormat = SqlHint.HintOptionFormat.LITERAL_LIST; } | LOOKAHEAD(2) [ ] { hintOptions = SqlNodeList.EMPTY; optionFormat = SqlHint.HintOptionFormat.EMPTY; } ) { hints.add( new SqlHint(Span.of(hintOptions).end(this), hintName, hintOptions, optionFormat)); } } /** Parses hints following a table reference, * and returns the wrapped table reference. */ SqlNode TableHints(SqlIdentifier tableName) : { final List hints = new ArrayList(); } { AddHint(hints) ( AddHint(hints) )* { final SqlParserPos pos = Span.of(tableName).addAll(hints).end(this); final SqlNodeList hintList = new SqlNodeList(hints, pos); return new SqlTableRef(pos, tableName, hintList); } } /** * Parses a leaf SELECT expression without ORDER BY. */ SqlSelect SqlSelect() : { final List keywords = new ArrayList(); final SqlLiteral keyword; final SqlNodeList keywordList; final List selectList = new ArrayList(); final SqlNode fromClause; final SqlNode where; final SqlNodeList groupBy; final SqlNode having; final SqlNodeList windowDecls; final SqlNode qualify; final List hints = new ArrayList(); final Span s; } { | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | ) } /* LITERALS */ TOKEN : { < UNSIGNED_INTEGER_LITERAL: (["0"-"9"])+ > | < APPROX_NUMERIC_LITERAL: ( | ) > | < DECIMAL_NUMERIC_LITERAL: (["0"-"9"])+(".")?(["0"-"9"])* | "."(["0"-"9"])+ > | < #EXPONENT: ["e","E"] (["+","-"])? (["0"-"9"])+ > | < #HEXDIGIT: ["0"-"9","a"-"f","A"-"F"] > | < #WHITESPACE: [ " ","\t","\n","\r","\f" ] > | /* To improve error reporting, we allow all kinds of characters, * not just hexits, in a binary string literal. */ < BINARY_STRING_LITERAL: ["x","X"] ( (~["'"]) | ("''"))* > } // All databases except BigQuery support standard single-quoted literals, // which use single-quote as the escape character. TOKEN : { < QUOTED_STRING: ( (~["'"]) | ("''"))* > | < PREFIXED_STRING_LITERAL: ("_" | "N") > | < UNICODE_STRING_LITERAL: "U" "&" > | < C_STYLE_ESCAPED_STRING_LITERAL: "E" ( (~["'", "\\"]) | ("\\" ~[]) | "''")* > | < #CHARSETNAME: (["a"-"z","A"-"Z","0"-"9"]) (["a"-"z","A"-"Z","0"-"9",":",".","-","_"])* > } // BigQuery supports single- and double-quoted literals with back-slash // as the escape character. TOKEN : { // BigQuery-style double-quoted string, escaped using backslash < BIG_QUERY_DOUBLE_QUOTED_STRING: ( (~["\\", "\""]) | ("\\" ~[]) )* > | // BigQuery-style single-quoted string, escaped using backslash < BIG_QUERY_QUOTED_STRING: ( (~["\\", "'"]) | ("\\" ~[]) )* > } TOKEN : { < UNICODE_QUOTED_ESCAPE_CHAR: (~["0"-"9","a"-"f","A"-"F","+","\""," ","\t","\n","\r","\f"]) > } /* SEPARATORS */ TOKEN : { < LPAREN: "("> | < RPAREN: ")"> | < LBRACE_D: "{" (" ")* ["d","D"] > | < LBRACE_T: "{" (" ")* ["t","T"] > | < LBRACE_TS: "{" (" ")* ["t","T"] ["s","S"] > | < LBRACE_FN: "{" (" ")* ["f","F"] ["n","N"] > | < LBRACE: "{" > | < RBRACE: "}" > | < LBRACKET: "[" > | < RBRACKET: "]" > | < SEMICOLON: ";" > | < DOT: "." > | < COMMA: "," > } /* OPERATORS */ TOKEN : { < EQ: "=" > | < GT: ">" > | < LT: "<" > | < HOOK: "?" > | < COLON: ":" > | < LE: "<=" > | < GE: ">=" > | < NE: "<>" > | < NE2: "!=" > | < PLUS: "+" > | < MINUS: "-" > | < STAR: "*" > | < SLASH: "/" > | < PERCENT_REMAINDER: "%" > | < CONCAT: "||" > | < NAMED_ARGUMENT_ASSIGNMENT: "=>" > | < DOUBLE_PERIOD: ".." > | < QUOTE: "'" > | < DOUBLE_QUOTE: "\"" > | < VERTICAL_BAR: "|" > | < CARET: "^" > | < DOLLAR: "$" > } /***************************************** * Lexical Descriptions * *****************************************/ TOKEN_MGR_DECLS : { final List lexicalStateStack = new ArrayList(); void pushState() { lexicalStateStack.add(curLexState); } void popState() { SwitchTo(lexicalStateStack.remove(lexicalStateStack.size() - 1)); } void beforeTableName() { if (curLexState == BQID) { pushState(); SwitchTo(BQHID); } } void afterTableName() { if (curLexState == BQHID) { popState(); } } } /* Lexical states: DEFAULT: Identifiers are quoted in brackets, e.g. [My Identifier] DQID: Identifiers are double-quoted, e.g. "My Identifier" BTID: Identifiers are enclosed in back-ticks, escaped using back-ticks, e.g. `My ``Quoted`` Identifier` BQID: Identifiers are enclosed in back-ticks, escaped using backslash, e.g. `My \`Quoted\` Identifier`, and with the potential to shift into BQHID in contexts where table names are expected, and thus allow hyphen-separated identifiers as part of table names BQHID: Identifiers are enclosed in back-ticks, escaped using backslash, e.g. `My \`Quoted\` Identifier` and unquoted identifiers may contain hyphens, e.g. foo-bar IN_SINGLE_LINE_COMMENT: IN_FORMAL_COMMENT: IN_MULTI_LINE_COMMENT: DEFAULT, DQID, BTID, BQID are the 4 'normal states'. Behavior is identical except for how quoted identifiers are recognized. The BQHID state exists only at the start of a table name (e.g. immediately after FROM or INSERT INTO). As soon as an identifier is seen, the state shifts back to BTID. After a comment has completed, the lexer returns to the previous state, one of the 'normal states'. */ /* WHITE SPACE */ SKIP : { " " | "\t" | "\n" | "\r" | "\f" } /* COMMENTS */ TOKEN : { < HINT_BEG: "/*+"> | < COMMENT_END: "*/" > } MORE : { <"/**" ~["/"]> { pushState(); } : IN_FORMAL_COMMENT } MORE : { "/*" { pushState(); } : IN_MULTI_LINE_COMMENT } SKIP : { } SPECIAL_TOKEN : { > { popState(); } } SPECIAL_TOKEN : { > { popState(); } } MORE : { < ~[] > } /* IDENTIFIERS */ TOKEN : { < BRACKET_QUOTED_IDENTIFIER: "[" ( (~["]","\n","\r"]) | ("]]") )+ "]" > } TOKEN : { < QUOTED_IDENTIFIER: "\"" ( (~["\"","\n","\r"]) | ("\"\"") )+ "\"" > } TOKEN : { < BACK_QUOTED_IDENTIFIER: "`" ( (~["`","\n","\r"]) | ("``") )+ "`" > } TOKEN : { // BigQuery-style backtick-quoted identifier, escaped using backslash < BIG_QUERY_BACK_QUOTED_IDENTIFIER: "`" ( (~["\\", "`"]) | ("\\" ~[]) )* "`" > } TOKEN : { // Per BigQuery: "Project IDs must contain 6-63 lowercase letters, digits, // or dashes. IDs must start with a letter and may not end with a dash." // We do not restrict length, or prevent identifiers from ending in a dash. < HYPHENATED_IDENTIFIER: (||"-")* > { popState(); } } TOKEN : { < IDENTIFIER: (|)* > } TOKEN : { < COLLATION_ID: (|)+ (||":"|"."|"-"|"_")* "$" (|"_")+ ("$" (||"_")+)? > | < UNICODE_QUOTED_IDENTIFIER: "U" "&" > | < #LETTER: [ "\u0024", "\u0041"-"\u005a", "\u005f", "\u0061"-"\u007a", "\u00c0"-"\u00d6", "\u00d8"-"\u00f6", "\u00f8"-"\u00ff", "\u0100"-"\u1fff", "\u3040"-"\u318f", "\u3300"-"\u337f", "\u3400"-"\u3d2d", "\u4e00"-"\u9fff", "\uf900"-"\ufaff" ] > | < #DIGIT: [ "\u0030"-"\u0039", "\u0660"-"\u0669", "\u06f0"-"\u06f9", "\u0966"-"\u096f", "\u09e6"-"\u09ef", "\u0a66"-"\u0a6f", "\u0ae6"-"\u0aef", "\u0b66"-"\u0b6f", "\u0be7"-"\u0bef", "\u0c66"-"\u0c6f", "\u0ce6"-"\u0cef", "\u0d66"-"\u0d6f", "\u0e50"-"\u0e59", "\u0ed0"-"\u0ed9", "\u1040"-"\u1049" ] > } /* Special token to throw a wrench in the works. It is never valid in SQL, and so when it occurs, it causes the parser to print which tokens would have been valid at that point. Used by SqlAdvisor. */ TOKEN : { < BEL: [ "\u0007" ] > } /** * Defines a production which can never be accepted by the parser. * In effect, it tells the parser, "If you got here, you've gone too far." * It is used as the default production for parser extension points; * derived parsers replace it with a real production when they want to * implement a particular extension point. */ void UnusedExtension() : { } { ( LOOKAHEAD({false}) ) }