options {
STATIC = false;
IGNORE_CASE = true;
UNICODE_INPUT = true;
}
PARSER_BEGIN(DruidSqlParserImpl)
package org.apache.druid.sql.calcite.parser;
import java.util.List;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlInsert;
import org.apache.calcite.sql.SqlNodeList;
import org.apache.calcite.sql.SqlBasicCall;
import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.java.util.common.granularity.GranularityType;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.sql.calcite.parser.DruidSqlInsert;
import org.apache.druid.sql.calcite.parser.DruidSqlParserUtils;
import org.apache.druid.sql.calcite.external.ExtendOperator;
import org.apache.druid.sql.calcite.external.ParameterizeOperator;
import org.apache.druid.sql.calcite.parser.ExternalDestinationSqlIdentifier;
import java.util.HashMap;
import org.apache.calcite.avatica.util.Casing;
import org.apache.calcite.avatica.util.TimeUnit;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.runtime.CalciteContextException;
import org.apache.calcite.sql.JoinConditionType;
import org.apache.calcite.sql.JoinType;
import org.apache.calcite.sql.SqlAlter;
import org.apache.calcite.sql.SqlBasicTypeNameSpec;
import org.apache.calcite.sql.SqlBinaryOperator;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlCharStringLiteral;
import org.apache.calcite.sql.SqlCollation;
import org.apache.calcite.sql.SqlCollectionTypeNameSpec;
import org.apache.calcite.sql.SqlDataTypeSpec;
import org.apache.calcite.sql.SqlDelete;
import org.apache.calcite.sql.SqlDescribeSchema;
import org.apache.calcite.sql.SqlDescribeTable;
import org.apache.calcite.sql.SqlDynamicParam;
import org.apache.calcite.sql.SqlExplain;
import org.apache.calcite.sql.SqlExplainFormat;
import org.apache.calcite.sql.SqlExplainLevel;
import org.apache.calcite.sql.SqlFunction;
import org.apache.calcite.sql.SqlFunctionCategory;
import org.apache.calcite.sql.SqlHint;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlInsert;
import org.apache.calcite.sql.SqlInsertKeyword;
import org.apache.calcite.sql.SqlIntervalQualifier;
import org.apache.calcite.sql.SqlJdbcDataTypeName;
import org.apache.calcite.sql.SqlJdbcFunctionCall;
import org.apache.calcite.sql.SqlJoin;
import org.apache.calcite.sql.SqlJsonConstructorNullClause;
import org.apache.calcite.sql.SqlJsonEncoding;
import org.apache.calcite.sql.SqlJsonExistsErrorBehavior;
import org.apache.calcite.sql.SqlJsonEmptyOrError;
import org.apache.calcite.sql.SqlJsonQueryEmptyOrErrorBehavior;
import org.apache.calcite.sql.SqlJsonQueryWrapperBehavior;
import org.apache.calcite.sql.SqlJsonValueEmptyOrErrorBehavior;
import org.apache.calcite.sql.SqlJsonValueReturning;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlLiteral;
import org.apache.calcite.sql.SqlMatchRecognize;
import org.apache.calcite.sql.SqlMerge;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlNodeList;
import org.apache.calcite.sql.SqlNumericLiteral;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.SqlOrderBy;
import org.apache.calcite.sql.SqlPivot;
import org.apache.calcite.sql.SqlPostfixOperator;
import org.apache.calcite.sql.SqlPrefixOperator;
import org.apache.calcite.sql.SqlRowTypeNameSpec;
import org.apache.calcite.sql.SqlSampleSpec;
import org.apache.calcite.sql.SqlSelect;
import org.apache.calcite.sql.SqlSelectKeyword;
import org.apache.calcite.sql.SqlSetOption;
import org.apache.calcite.sql.SqlSnapshot;
import org.apache.calcite.sql.SqlTableRef;
import org.apache.calcite.sql.SqlTypeNameSpec;
import org.apache.calcite.sql.SqlUnnestOperator;
import org.apache.calcite.sql.SqlUnpivot;
import org.apache.calcite.sql.SqlUpdate;
import org.apache.calcite.sql.SqlUserDefinedTypeNameSpec;
import org.apache.calcite.sql.SqlUtil;
import org.apache.calcite.sql.SqlWindow;
import org.apache.calcite.sql.SqlWith;
import org.apache.calcite.sql.SqlWithItem;
import org.apache.calcite.sql.fun.SqlCase;
import org.apache.calcite.sql.fun.SqlInternalOperators;
import org.apache.calcite.sql.fun.SqlLibraryOperators;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.calcite.sql.fun.SqlTrimFunction;
import org.apache.calcite.sql.parser.Span;
import org.apache.calcite.sql.parser.SqlAbstractParserImpl;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.calcite.sql.parser.SqlParserImplFactory;
import org.apache.calcite.sql.parser.SqlParserPos;
import org.apache.calcite.sql.parser.SqlParserUtil;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.calcite.sql.validate.SqlConformance;
import org.apache.calcite.sql.validate.SqlConformanceEnum;
import org.apache.calcite.util.Glossary;
import org.apache.calcite.util.Pair;
import org.apache.calcite.util.SourceStringReader;
import org.apache.calcite.util.Util;
import org.apache.calcite.util.trace.CalciteTrace;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.slf4j.Logger;
import java.io.Reader;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import static org.apache.calcite.util.Static.RESOURCE;
/**
* SQL parser, generated from Parser.jj by JavaCC.
*
* The public wrapper for this parser is {@link SqlParser}.
*/
public class DruidSqlParserImpl extends SqlAbstractParserImpl
{
private static final Logger LOGGER = CalciteTrace.getParserTracer();
// Can't use quoted literal because of a bug in how JavaCC translates
// backslash-backslash.
private static final char BACKSLASH = 0x5c;
private static final char DOUBLE_QUOTE = 0x22;
private static final String DQ = DOUBLE_QUOTE + "";
private static final String DQDQ = DQ + DQ;
private static final SqlLiteral LITERAL_ZERO =
SqlLiteral.createExactNumeric("0", SqlParserPos.ZERO);
private static final SqlLiteral LITERAL_ONE =
SqlLiteral.createExactNumeric("1", SqlParserPos.ZERO);
private static final SqlLiteral LITERAL_MINUS_ONE =
SqlLiteral.createExactNumeric("-1", SqlParserPos.ZERO);
private static Metadata metadata;
private Casing unquotedCasing;
private Casing quotedCasing;
private int identifierMaxLength;
private SqlConformance conformance;
/**
* {@link SqlParserImplFactory} implementation for creating parser.
*/
public static final SqlParserImplFactory FACTORY = new SqlParserImplFactory() {
public SqlAbstractParserImpl getParser(Reader reader) {
final DruidSqlParserImpl parser = new DruidSqlParserImpl(reader);
if (reader instanceof SourceStringReader) {
final String sql =
((SourceStringReader) reader).getSourceString();
parser.setOriginalSql(sql);
}
return parser;
}
};
public SqlParseException normalizeException(Throwable ex) {
try {
if (ex instanceof ParseException) {
ex = cleanupParseException((ParseException) ex);
}
return convertException(ex);
} catch (ParseException e) {
throw new AssertionError(e);
}
}
public Metadata getMetadata() {
synchronized (DruidSqlParserImpl.class) {
if (metadata == null) {
metadata = new MetadataImpl(
new DruidSqlParserImpl(new java.io.StringReader("")));
}
return metadata;
}
}
public void setTabSize(int tabSize) {
jj_input_stream.setTabSize(tabSize);
}
public void switchTo(SqlAbstractParserImpl.LexicalState state) {
final int stateOrdinal =
Arrays.asList(DruidSqlParserImplTokenManager.lexStateNames)
.indexOf(state.name());
token_source.SwitchTo(stateOrdinal);
}
public void setQuotedCasing(Casing quotedCasing) {
this.quotedCasing = quotedCasing;
}
public void setUnquotedCasing(Casing unquotedCasing) {
this.unquotedCasing = unquotedCasing;
}
public void setIdentifierMaxLength(int identifierMaxLength) {
this.identifierMaxLength = identifierMaxLength;
}
public void setConformance(SqlConformance conformance) {
this.conformance = conformance;
}
public SqlNode parseSqlExpressionEof() throws Exception {
return SqlExpressionEof();
}
public SqlNode parseSqlStmtEof() throws Exception {
return SqlStmtEof();
}
public SqlNodeList parseSqlStmtList() throws Exception {
return SqlStmtList();
}
public SqlNode parseArray() throws SqlParseException {
switchTo(LexicalState.BQID);
try {
return ArrayLiteral();
} catch (ParseException ex) {
throw normalizeException(ex);
} catch (TokenMgrError ex) {
throw normalizeException(ex);
}
}
private SqlNode extend(SqlNode table, SqlNodeList extendList) {
return SqlStdOperatorTable.EXTEND.createCall(
Span.of(table, extendList).pos(), table, extendList);
}
/** Adds a warning that a token such as "HOURS" was used,
* whereas the SQL standard only allows "HOUR".
*
*
Currently, we silently add an exception to a list of warnings. In
* future, we may have better compliance checking, for example a strict
* compliance mode that throws if any non-standard features are used. */
private TimeUnit warn(TimeUnit timeUnit) throws ParseException {
final String token = getToken(0).image.toUpperCase(Locale.ROOT);
warnings.add(
SqlUtil.newContextException(getPos(),
RESOURCE.nonStandardFeatureUsed(token)));
return timeUnit;
}
}
PARSER_END(DruidSqlParserImpl)
/***************************************
* Utility Codes for Semantic Analysis *
***************************************/
/* For Debug */
JAVACODE
void debug_message1() {
LOGGER.info("{} , {}", getToken(0).image, getToken(1).image);
}
JAVACODE String unquotedIdentifier() {
return SqlParserUtil.toCase(getToken(0).image, unquotedCasing);
}
/**
* Allows parser to be extended with new types of table references. The
* default implementation of this production is empty.
*/
SqlNode ExtendedTableRef() :
{
}
{
UnusedExtension()
{
return null;
}
}
/**
* Allows an OVER clause following a table expression as an extension to
* standard SQL syntax. The default implementation of this production is empty.
*/
SqlNode TableOverOpt() :
{
}
{
{
return null;
}
}
/*
* Parses dialect-specific keywords immediately following the SELECT keyword.
*/
void SqlSelectKeywords(List keywords) :
{}
{
E()
}
/*
* Parses dialect-specific keywords immediately following the INSERT keyword.
*/
void SqlInsertKeywords(List keywords) :
{}
{
E()
}
/*
* Parse Floor/Ceil function parameters
*/
SqlNode FloorCeilOptions(Span s, boolean floorFlag) :
{
SqlNode node;
}
{
node = StandardFloorCeilOptions(s, floorFlag) {
return node;
}
}
/*
// This file contains the heart of a parser for SQL SELECT statements.
// code can be shared between various parsers (for example, a DDL parser and a
// DML parser) but is not a standalone JavaCC file. You need to prepend a
// parser declaration (such as that in Parser.jj).
*/
/* Epsilon */
JAVACODE
void E() {}
/** @Deprecated */
JAVACODE List startList(Object o)
{
List list = new ArrayList();
list.add(o);
return list;
}
/*
* NOTE jvs 6-Feb-2004: The straightforward way to implement the SQL grammar is
* to keep query expressions (SELECT, UNION, etc) separate from row expressions
* (+, LIKE, etc). However, this is not possible with an LL(k) parser, because
* both kinds of expressions allow parenthesization, so no fixed amount of left
* context is ever good enough. A sub-query can be a leaf in a row expression,
* and can include operators like UNION, so it's not even possible to use a
* syntactic lookahead rule like "look past an indefinite number of parentheses
* until you see SELECT, VALUES, or TABLE" (since at that point we still
* don't know whether we're parsing a sub-query like ((select ...) + x)
* vs. (select ... union select ...).
*
* The somewhat messy solution is to unify the two kinds of expression,
* and to enforce syntax rules using parameterized context. This
* is the purpose of the ExprContext parameter. It is passed to
* most expression productions, which check the expressions encountered
* against the context for correctness. When a query
* element like SELECT is encountered, the production calls
* checkQueryExpression, which will throw an exception if
* a row expression was expected instead. When a row expression like
* IN is encountered, the production calls checkNonQueryExpression
* instead. It is very important to understand how this works
* when modifying the grammar.
*
* The commingling of expressions results in some bogus ambiguities which are
* resolved with LOOKAHEAD hints. The worst example is comma. SQL allows both
* (WHERE x IN (1,2)) and (WHERE x IN (select ...)). This means when we parse
* the right-hand-side of an IN, we have to allow any kind of expression inside
* the parentheses. Now consider the expression "WHERE x IN(SELECT a FROM b
* GROUP BY c,d)". When the parser gets to "c,d" it doesn't know whether the
* comma indicates the end of the GROUP BY or the end of one item in an IN
* list. Luckily, we know that select and comma-list are mutually exclusive
* within IN, so we use maximal munch for the GROUP BY comma. However, this
* usage of hints could easily mask unintended ambiguities resulting from
* future changes to the grammar, making it very brittle.
*/
JAVACODE protected SqlParserPos getPos()
{
return new SqlParserPos(
token.beginLine,
token.beginColumn,
token.endLine,
token.endColumn);
}
/** Starts a span at the current position. */
JAVACODE Span span()
{
return Span.of(getPos());
}
JAVACODE void checkQueryExpression(ExprContext exprContext)
{
switch (exprContext) {
case ACCEPT_NON_QUERY:
case ACCEPT_SUB_QUERY:
case ACCEPT_CURSOR:
throw SqlUtil.newContextException(getPos(),
RESOURCE.illegalQueryExpression());
}
}
JAVACODE void checkNonQueryExpression(ExprContext exprContext)
{
switch (exprContext) {
case ACCEPT_QUERY:
throw SqlUtil.newContextException(getPos(),
RESOURCE.illegalNonQueryExpression());
}
}
JAVACODE SqlNode checkNotJoin(SqlNode e)
{
if (e instanceof SqlJoin) {
throw SqlUtil.newContextException(e.getParserPosition(),
RESOURCE.illegalJoinExpression());
}
return e;
}
/**
* Converts a ParseException (local to this particular instantiation
* of the parser) into a SqlParseException (common to all parsers).
*/
JAVACODE SqlParseException convertException(Throwable ex)
{
if (ex instanceof SqlParseException) {
return (SqlParseException) ex;
}
SqlParserPos pos = null;
int[][] expectedTokenSequences = null;
String[] tokenImage = null;
if (ex instanceof ParseException) {
ParseException pex = (ParseException) ex;
expectedTokenSequences = pex.expectedTokenSequences;
tokenImage = pex.tokenImage;
if (pex.currentToken != null) {
final Token token = pex.currentToken.next;
// Checks token.image.equals("1") to avoid recursive call.
// The SqlAbstractParserImpl#MetadataImpl constructor uses constant "1" to
// throw intentionally to collect the expected tokens.
if (!token.image.equals("1")
&& getMetadata().isKeyword(token.image)
&& SqlParserUtil.allowsIdentifier(tokenImage, expectedTokenSequences)) {
// If the next token is a keyword, reformat the error message as:
// Incorrect syntax near the keyword '{keyword}' at line {line_number},
// column {column_number}.
final String expecting = ex.getMessage()
.substring(ex.getMessage().indexOf("Was expecting"));
final String errorMsg = String.format("Incorrect syntax near the keyword '%s' "
+ "at line %d, column %d.\n%s",
token.image,
token.beginLine,
token.beginColumn,
expecting);
// Replace the ParseException with explicit error message.
ex = new ParseException(errorMsg);
}
pos = new SqlParserPos(
token.beginLine,
token.beginColumn,
token.endLine,
token.endColumn);
}
} else if (ex instanceof TokenMgrError) {
expectedTokenSequences = null;
tokenImage = null;
// Example:
// Lexical error at line 3, column 24. Encountered "#" after "a".
final java.util.regex.Pattern pattern = java.util.regex.Pattern.compile(
"(?s)Lexical error at line ([0-9]+), column ([0-9]+).*");
java.util.regex.Matcher matcher = pattern.matcher(ex.getMessage());
if (matcher.matches()) {
int line = Integer.parseInt(matcher.group(1));
int column = Integer.parseInt(matcher.group(2));
pos = new SqlParserPos(line, column, line, column);
}
} else if (ex instanceof CalciteContextException) {
// CalciteContextException is the standard wrapper for exceptions
// produced by the validator, but in the parser, the standard is
// SqlParseException; so, strip it away. In case you were wondering,
// the CalciteContextException appears because the parser
// occasionally calls into validator-style code such as
// SqlSpecialOperator.reduceExpr.
CalciteContextException ece =
(CalciteContextException) ex;
pos = new SqlParserPos(
ece.getPosLine(),
ece.getPosColumn(),
ece.getEndPosLine(),
ece.getEndPosColumn());
ex = ece.getCause();
}
return new SqlParseException(
ex.getMessage(), pos, expectedTokenSequences, tokenImage, ex);
}
/**
* Removes or transforms misleading information from a parse exception.
*
* @param e dirty excn
*
* @return clean excn
*/
JAVACODE ParseException cleanupParseException(ParseException ex)
{
if (ex.expectedTokenSequences == null) {
return ex;
}
int iIdentifier = Arrays.asList(ex.tokenImage).indexOf("");
// Find all sequences in the error which contain identifier. For
// example,
// {}
// {A}
// {B, C}
// {D, }
// {D, A}
// {D, B}
//
// would yield
// {}
// {D}
final List prefixList = new ArrayList();
for (int i = 0; i < ex.expectedTokenSequences.length; ++i) {
int[] seq = ex.expectedTokenSequences[i];
int j = seq.length - 1;
int i1 = seq[j];
if (i1 == iIdentifier) {
int[] prefix = new int[j];
System.arraycopy(seq, 0, prefix, 0, j);
prefixList.add(prefix);
}
}
if (prefixList.isEmpty()) {
return ex;
}
int[][] prefixes = (int[][])
prefixList.toArray(new int[prefixList.size()][]);
// Since was one of the possible productions,
// we know that the parser will also have included all
// of the non-reserved keywords (which are treated as
// identifiers in non-keyword contexts). So, now we need
// to clean those out, since they're totally irrelevant.
final List list = new ArrayList();
Metadata metadata = getMetadata();
for (int i = 0; i < ex.expectedTokenSequences.length; ++i) {
int [] seq = ex.expectedTokenSequences[i];
String tokenImage = ex.tokenImage[seq[seq.length - 1]];
String token = SqlParserUtil.getTokenVal(tokenImage);
if (token == null || !metadata.isNonReservedKeyword(token)) {
list.add(seq);
continue;
}
boolean match = matchesPrefix(seq, prefixes);
if (!match) {
list.add(seq);
}
}
ex.expectedTokenSequences =
(int [][]) list.toArray(new int [list.size()][]);
return ex;
}
JAVACODE boolean matchesPrefix(int[] seq, int[][] prefixes)
{
nextPrefix:
for (int[] prefix : prefixes) {
if (seq.length == prefix.length + 1) {
for (int k = 0; k < prefix.length; k++) {
if (prefix[k] != seq[k]) {
continue nextPrefix;
}
}
return true;
}
}
return false;
}
/*****************************************
* Syntactical Descriptions *
*****************************************/
SqlNode ExprOrJoinOrOrderedQuery(ExprContext exprContext) :
{
SqlNode e;
final List list = new ArrayList();
}
{
// Lookhead to distinguish between "TABLE emp" (which will be
// matched by ExplicitTable() via Query())
// and "TABLE fun(args)" (which will be matched by TableRef())
(
LOOKAHEAD(2)
e = Query(exprContext)
e = OrderByLimitOpt(e)
{ return e; }
|
e = TableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN)
( e = JoinTable(e) )*
{ list.add(e); }
( AddSetOpQuery(list, exprContext) )*
{ return SqlParserUtil.toTree(list); }
)
}
/**
* Parses either a row expression or a query expression with an optional
* ORDER BY.
*
* Postgres syntax for limit:
*
*
* [ LIMIT { count | ALL } ]
* [ OFFSET start ]
*
*
* Trino syntax for limit:
*
*
* [ OFFSET start ]
* [ LIMIT { count | ALL } ]
*
*
* MySQL syntax for limit:
*
*
* [ LIMIT { count | start, count } ]
*
*
* SQL:2008 syntax for limit:
*
*
* [ OFFSET start { ROW | ROWS } ]
* [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]
*
*/
SqlNode OrderedQueryOrExpr(ExprContext exprContext) :
{
SqlNode e;
}
{
e = QueryOrExpr(exprContext)
e = OrderByLimitOpt(e)
{ return e; }
}
/** Reads optional "ORDER BY", "LIMIT", "OFFSET", "FETCH" following a query,
* {@code e}. If any of them are present, adds them to the query;
* otherwise returns the query unchanged.
* Throws if they are present and {@code e} is not a query. */
SqlNode OrderByLimitOpt(SqlNode e) :
{
final SqlNodeList orderBy;
final Span s = Span.of();
SqlNode[] offsetFetch = {null, null};
}
{
(
// use the syntactic type of the expression we just parsed
// to decide whether ORDER BY makes sense
orderBy = OrderBy(e.isA(SqlKind.QUERY))
| { orderBy = null; }
)
[
LimitClause(s, offsetFetch)
[ OffsetClause(s, offsetFetch) ]
|
OffsetClause(s, offsetFetch)
[
LimitClause(s, offsetFetch) {
if (!this.conformance.isOffsetLimitAllowed()) {
throw SqlUtil.newContextException(s.end(this),
RESOURCE.offsetLimitNotAllowed());
}
}
|
FetchClause(offsetFetch)
]
|
FetchClause(offsetFetch)
]
{
if (orderBy != null || offsetFetch[0] != null || offsetFetch[1] != null) {
return new SqlOrderBy(getPos(), e,
Util.first(orderBy, SqlNodeList.EMPTY),
offsetFetch[0], offsetFetch[1]);
}
return e;
}
}
/**
* Parses an OFFSET clause in an ORDER BY expression.
*/
void OffsetClause(Span s, SqlNode[] offsetFetch) :
{
}
{
// ROW or ROWS is required in SQL:2008 but we make it optional
// because it is not present in Postgres-style syntax.
{ s.add(this); }
offsetFetch[0] = UnsignedNumericLiteralOrParam()
[ | ]
}
/**
* Parses a FETCH clause in an ORDER BY expression.
*/
void FetchClause(SqlNode[] offsetFetch) :
{
}
{
// SQL:2008-style syntax. "OFFSET ... FETCH ...".
// If you specify both LIMIT and FETCH, FETCH wins.
( | ) offsetFetch[1] = UnsignedNumericLiteralOrParam()
( | )
}
/**
* Parses a LIMIT clause in an ORDER BY expression.
*/
void LimitClause(Span s, SqlNode[] offsetFetch) :
{
}
{
// Postgres-style syntax. "LIMIT ... OFFSET ..."
{ s.add(this); }
(
// MySQL-style syntax. "LIMIT start, count"
LOOKAHEAD(2)
offsetFetch[0] = UnsignedNumericLiteralOrParam()
offsetFetch[1] = UnsignedNumericLiteralOrParam() {
if (!this.conformance.isLimitStartCountAllowed()) {
throw SqlUtil.newContextException(s.end(this),
RESOURCE.limitStartCountNotAllowed());
}
}
|
offsetFetch[1] = UnsignedNumericLiteralOrParam()
|
)
}
/**
* Parses a leaf in a query expression (SELECT, VALUES or TABLE).
*/
SqlNode LeafQuery(ExprContext exprContext) :
{
SqlNode e;
}
{
{
// ensure a query is legal in this context
checkQueryExpression(exprContext);
}
e = SqlSelect() { return e; }
|
e = TableConstructor() { return e; }
|
e = ExplicitTable(getPos()) { return e; }
}
/**
* Parses a parenthesized query or single row expression.
* Depending on {@code exprContext}, may also accept a join.
*/
SqlNode ParenthesizedExpression(ExprContext exprContext) :
{
SqlNode e;
}
{
{
// we've now seen left paren, so queries inside should
// be allowed as sub-queries
switch (exprContext) {
case ACCEPT_SUB_QUERY:
exprContext = ExprContext.ACCEPT_NONCURSOR;
break;
case ACCEPT_CURSOR:
exprContext = ExprContext.ACCEPT_ALL;
break;
}
}
e = ExprOrJoinOrOrderedQuery(exprContext)
{
exprContext.throwIfNotCompatible(e);
return e;
}
}
/**
* Parses a parenthesized query or comma-list of row expressions.
*
* REVIEW jvs 8-Feb-2004: There's a small hole in this production. It can be
* used to construct something like
*
*
* WHERE x IN (select count(*) from t where c=d,5)
*
*
* which should be illegal. The above is interpreted as equivalent to
*
*
* WHERE x IN ((select count(*) from t where c=d),5)
*
*
* which is a legal use of a sub-query. The only way to fix the hole is to
* be able to remember whether a subexpression was parenthesized or not, which
* means preserving parentheses in the SqlNode tree. This is probably
* desirable anyway for use in purely syntactic parsing applications (e.g. SQL
* pretty-printer). However, if this is done, it's important to also make
* isA() on the paren node call down to its operand so that we can
* always correctly discriminate a query from a row expression.
*/
SqlNodeList ParenthesizedQueryOrCommaList(
ExprContext exprContext) :
{
SqlNode e;
final List list = new ArrayList();
ExprContext firstExprContext = exprContext;
final Span s;
}
{
{
// we've now seen left paren, so a query by itself should
// be interpreted as a sub-query
s = span();
switch (exprContext) {
case ACCEPT_SUB_QUERY:
firstExprContext = ExprContext.ACCEPT_NONCURSOR;
break;
case ACCEPT_CURSOR:
firstExprContext = ExprContext.ACCEPT_ALL;
break;
}
}
e = OrderedQueryOrExpr(firstExprContext) { list.add(e); }
(
{
// a comma-list can't appear where only a query is expected
checkNonQueryExpression(exprContext);
}
AddExpression(list, exprContext)
)*
{
return new SqlNodeList(list, s.end(this));
}
}
/** As ParenthesizedQueryOrCommaList, but allows DEFAULT
* in place of any of the expressions. For example,
* {@code (x, DEFAULT, null, DEFAULT)}. */
SqlNodeList ParenthesizedQueryOrCommaListWithDefault(
ExprContext exprContext) :
{
SqlNode e;
final List list = new ArrayList();
ExprContext firstExprContext = exprContext;
final Span s;
}
{
{
// we've now seen left paren, so a query by itself should
// be interpreted as a sub-query
s = span();
switch (exprContext) {
case ACCEPT_SUB_QUERY:
firstExprContext = ExprContext.ACCEPT_NONCURSOR;
break;
case ACCEPT_CURSOR:
firstExprContext = ExprContext.ACCEPT_ALL;
break;
}
}
(
e = OrderedQueryOrExpr(firstExprContext) { list.add(e); }
|
e = Default() { list.add(e); }
)
(
{
// a comma-list can't appear where only a query is expected
checkNonQueryExpression(exprContext);
}
(
e = Expression(exprContext) { list.add(e); }
|
e = Default() { list.add(e); }
)
)*
{
return new SqlNodeList(list, s.end(this));
}
}
/**
* Parses function parameter lists.
* If the list starts with DISTINCT or ALL, it is discarded.
*/
List UnquantifiedFunctionParameterList(ExprContext exprContext) :
{
final List args;
}
{
args = FunctionParameterList(exprContext) {
args.remove(0); // remove DISTINCT or ALL, if present
return args;
}
}
/**
* Parses function parameter lists including DISTINCT keyword recognition,
* DEFAULT, and named argument assignment.
*/
List FunctionParameterList(ExprContext exprContext) :
{
final SqlLiteral qualifier;
final List list = new ArrayList();
}
{
(
qualifier = AllOrDistinct() { list.add(qualifier); }
|
{ list.add(null); }
)
AddArg0(list, exprContext)
(
{
// a comma-list can't appear where only a query is expected
checkNonQueryExpression(exprContext);
}
AddArg(list, exprContext)
)*
{
return list;
}
}
SqlLiteral AllOrDistinct() :
{
}
{
{ return SqlSelectKeyword.DISTINCT.symbol(getPos()); }
|
{ return SqlSelectKeyword.ALL.symbol(getPos()); }
}
void AddArg0(List list, ExprContext exprContext) :
{
final SqlIdentifier name;
SqlNode e;
final ExprContext firstExprContext;
{
// we've now seen left paren, so queries inside should
// be allowed as sub-queries
switch (exprContext) {
case ACCEPT_SUB_QUERY:
firstExprContext = ExprContext.ACCEPT_NONCURSOR;
break;
case ACCEPT_CURSOR:
firstExprContext = ExprContext.ACCEPT_ALL;
break;
default:
firstExprContext = exprContext;
break;
}
}
}
{
(
LOOKAHEAD(2) name = SimpleIdentifier()
| { name = null; }
)
(
e = Default()
|
LOOKAHEAD(3)
e = TableParam()
|
e = PartitionedQueryOrQueryOrExpr(firstExprContext)
)
{
if (name != null) {
e = SqlStdOperatorTable.ARGUMENT_ASSIGNMENT.createCall(
Span.of(name, e).pos(), e, name);
}
list.add(e);
}
}
void AddArg(List list, ExprContext exprContext) :
{
final SqlIdentifier name;
SqlNode e;
}
{
(
LOOKAHEAD(2) name = SimpleIdentifier()
| { name = null; }
)
(
e = Default()
|
e = Expression(exprContext)
|
e = TableParam()
)
{
if (name != null) {
e = SqlStdOperatorTable.ARGUMENT_ASSIGNMENT.createCall(
Span.of(name, e).pos(), e, name);
}
list.add(e);
}
}
SqlNode Default() : {}
{
{
return SqlStdOperatorTable.DEFAULT.createCall(getPos());
}
}
/**
* Parses a query (SELECT, UNION, INTERSECT, EXCEPT, VALUES, TABLE) followed by
* the end-of-file symbol.
*/
SqlNode SqlQueryEof() :
{
SqlNode query;
}
{
query = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY)
{ return query; }
}
/**
* Parses a list of SQL statements separated by semicolon.
* The semicolon is required between statements, but is
* optional at the end.
*/
SqlNodeList SqlStmtList() :
{
final List stmtList = new ArrayList();
SqlNode stmt;
}
{
stmt = SqlStmt() {
stmtList.add(stmt);
}
(
[
stmt = SqlStmt() {
stmtList.add(stmt);
}
]
)*
{
return new SqlNodeList(stmtList, Span.of(stmtList).pos());
}
}
/**
* Parses an SQL statement.
*/
SqlNode SqlStmt() :
{
SqlNode stmt;
}
{
(
LOOKAHEAD(2) stmt = DruidSqlInsertEof()
|
LOOKAHEAD(2) stmt = DruidSqlExplain()
|
LOOKAHEAD(2) stmt = DruidSqlReplaceEof()
|
stmt = SqlSetOption(Span.of(), null)
|
stmt = SqlAlter()
|
stmt = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY)
|
stmt = SqlExplain()
|
stmt = SqlDescribe()
|
stmt = SqlInsert()
|
stmt = SqlDelete()
|
stmt = SqlUpdate()
|
stmt = SqlMerge()
|
stmt = SqlProcedureCall()
)
{
return stmt;
}
}
/**
* Parses an SQL statement followed by the end-of-file symbol.
*/
SqlNode SqlStmtEof() :
{
SqlNode stmt;
}
{
stmt = SqlStmt()
{
return stmt;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
SqlGranularityLiteral PartitionGranularity() :
{
SqlNode e;
Granularity granularity;
String unparseString;
}
{
(
{
granularity = Granularities.HOUR;
unparseString = "HOUR";
}
|
{
granularity = Granularities.DAY;
unparseString = "DAY";
}
|
{
granularity = Granularities.MONTH;
unparseString = "MONTH";
}
|
{
granularity = Granularities.YEAR;
unparseString = "YEAR";
}
|
{
granularity = Granularities.ALL;
unparseString = "ALL";
}
[
{
unparseString += " TIME";
}
]
|
e = Expression(ExprContext.ACCEPT_SUB_QUERY)
{
granularity = DruidSqlParserUtils.convertSqlNodeToGranularity(e);
unparseString = e.toString();
}
)
{
return new SqlGranularityLiteral(granularity, unparseString, getPos());
}
}
SqlNodeList ClusteredBy() :
{
final List list = new ArrayList();
final Span s;
SqlNode e;
}
{
{
s = span();
}
AddOrderItem(list)
(
LOOKAHEAD(2) AddOrderItem(list)
)*
{
return new SqlNodeList(list, s.addAll(list).pos());
}
}
SqlTypeNameSpec DruidType() :
{
String typeName;
}
{
{
typeName = SqlParserUtil.trim(token.image, "'");
}
{
return new SqlUserDefinedTypeNameSpec(typeName, span().pos());
}
}
// Parses the supported file formats for export.
SqlIdentifier FileFormat() :
{
SqlNode format;
}
{
format = SimpleIdentifier()
{
return (SqlIdentifier) format;
}
}
SqlIdentifier ExternalDestination() :
{
final Span s;
SqlIdentifier destinationType = null;
String destinationTypeString = null;
Map properties = new HashMap();
}
{
(
destinationType = SimpleIdentifier()
{
destinationTypeString = destinationType.toString();
}
|
{
// local is a reserved keyword in calcite. However, local is also a supported input source / destination and
// keeping the name is preferred for consistency in other places, and so that permission checks are applied
// correctly, so this is handled as a special case.
destinationTypeString = "local";
}
)
[ [ properties = ExternProperties() ] ]
{
s = span();
return new ExternalDestinationSqlIdentifier(
destinationTypeString,
s.pos(),
properties
);
}
}
Map ExternProperties() :
{
final Span s;
final Map properties = new HashMap();
SqlIdentifier identifier;
String value;
SqlNodeList commaList = SqlNodeList.EMPTY;
}
{
(
identifier = SimpleIdentifier() value = SimpleStringLiteral()
{
properties.put(identifier.toString(), value);
}
)
(
identifier = SimpleIdentifier() value = SimpleStringLiteral()
{
properties.put(identifier.toString(), value);
}
)*
{
return properties;
}
}
SqlNode testRule():
{
final SqlNode e;
}
{
e = SimpleIdentifier() { return e; }
}/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Parses an INSERT statement. This function is copied from SqlInsert in core/src/main/codegen/templates/Parser.jj,
* with some changes to allow a custom error message if an OVERWRITE clause is present.
*/
// Using fully qualified name for Pair class, since Calcite also has a same class name being used in the Parser.jj
SqlNode DruidSqlInsertEof() :
{
SqlNode insertNode;
final List keywords = new ArrayList();
final SqlNodeList keywordList;
final SqlIdentifier destination;
SqlNode tableRef = null;
SqlNode source;
final SqlNodeList columnList;
final Span s;
final Pair p;
SqlGranularityLiteral partitionedBy = null;
SqlNodeList clusteredBy = null;
SqlIdentifier exportFileFormat = null;
}
{
(
|
{ keywords.add(SqlInsertKeyword.UPSERT.symbol(getPos())); }
)
{ s = span(); }
SqlInsertKeywords(keywords) {
keywordList = new SqlNodeList(keywords, s.addAll(keywords).pos());
}
(
LOOKAHEAD(2)
destination = ExternalDestination()
|
destination = CompoundTableIdentifier()
( tableRef = TableHints(destination) | { tableRef = destination; } )
[ LOOKAHEAD(5) tableRef = ExtendTable(tableRef) ]
)
(
LOOKAHEAD(2)
p = ParenthesizedCompoundIdentifierList() {
if (p.right.size() > 0) {
tableRef = extend(tableRef, p.right);
}
if (p.left.size() > 0) {
columnList = p.left;
} else {
columnList = null;
}
}
| { columnList = null; }
)
[
exportFileFormat = FileFormat()
]
(
{
throw org.apache.druid.sql.calcite.parser.DruidSqlParserUtils.problemParsing(
"An OVERWRITE clause is not allowed with INSERT statements. Use REPLACE statements if overwriting existing segments is required or remove the OVERWRITE clause."
);
}
|
source = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY)
)
// PARTITIONED BY is necessary, but is kept optional in the grammar. It is asserted that it is not missing in the
// IngestHandler#validate() so that we can return a custom error message.
[
partitionedBy = PartitionGranularity()
]
[
clusteredBy = ClusteredBy()
]
{
if (clusteredBy != null && partitionedBy == null) {
throw org.apache.druid.sql.calcite.parser.DruidSqlParserUtils.problemParsing(
"CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause"
);
}
}
// EOF is also present in SqlStmtEof but EOF is a special case and a single EOF can be consumed multiple times.
// The reason for adding EOF here is to ensure that we create a DruidSqlInsert node after the syntax has been
// validated and throw SQL syntax errors before performing validations in the DruidSqlInsert which can overshadow the
// actual error message.
{
insertNode = new SqlInsert(s.end(source), keywordList, destination, source, columnList);
if (!(insertNode instanceof SqlInsert)) {
// This shouldn't be encountered, but done as a defensive practice. SqlInsert() always returns a node of type
// SqlInsert
return insertNode;
}
SqlInsert sqlInsert = (SqlInsert) insertNode;
return DruidSqlInsert.create(sqlInsert, partitionedBy, clusteredBy, exportFileFormat);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Parses an EXPLAIN PLAN statement. Allows for custom druid's statements as well.
* The main change from SqlExplain() rule is that the statements that can occur in front of the explain's can now be
* custom druid statements as well reflected in the DruidQueryOrSqlQueryOrDml() production rule
*
* Since this copies directly from SqlExplain(), this would need to be modified while updating Calcite to allow for
* any changes and improvements (e.g. adding another format apart from json or xml in which one can
* specify the explain plan output)
*/
SqlNode DruidSqlExplain() :
{
SqlNode stmt;
SqlExplainLevel detailLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES;
SqlExplain.Depth depth;
final SqlExplainFormat format;
}
{
[ detailLevel = ExplainDetailLevel() ]
depth = ExplainDepth()
(
LOOKAHEAD(2)
{ format = SqlExplainFormat.XML; }
|
{ format = SqlExplainFormat.JSON; }
|
{ format = SqlExplainFormat.TEXT; }
)
stmt = DruidQueryOrSqlQueryOrDml() {
return new SqlExplain(getPos(),
stmt,
detailLevel.symbol(SqlParserPos.ZERO),
depth.symbol(SqlParserPos.ZERO),
format.symbol(SqlParserPos.ZERO),
nDynamicParams);
}
}
SqlNode DruidQueryOrSqlQueryOrDml() :
{
SqlNode stmt;
}
{
(
stmt = DruidSqlInsertEof()
|
stmt = DruidSqlReplaceEof()
|
stmt = SqlQueryOrDml()
)
{
return stmt;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// Taken from syntax of SqlInsert statement from calcite parser, edited for replace syntax
SqlNode DruidSqlReplaceEof() :
{
final SqlIdentifier destination;
SqlNode source;
SqlNodeList columnList = null;
final Span s;
SqlNode tableRef = null;
SqlInsert sqlInsert;
SqlGranularityLiteral partitionedBy = null;
SqlNodeList clusteredBy = null;
final Pair p;
SqlNode replaceTimeQuery = null;
SqlIdentifier exportFileFormat = null;
}
{
{ s = span(); }
(
LOOKAHEAD(2)
destination = ExternalDestination()
|
destination = CompoundTableIdentifier()
( tableRef = TableHints(destination) | { tableRef = destination; } )
[ LOOKAHEAD(5) tableRef = ExtendTable(tableRef) ]
)
(
LOOKAHEAD(2)
p = ParenthesizedCompoundIdentifierList() {
if (p.right.size() > 0) {
tableRef = extend(tableRef, p.right);
}
if (p.left.size() > 0) {
columnList = p.left;
} else {
columnList = null;
}
}
| { columnList = null; }
)
[
exportFileFormat = FileFormat()
]
[
[
replaceTimeQuery = ReplaceTimeQuery()
]
]
source = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY)
// PARTITIONED BY is necessary, but is kept optional in the grammar. It is asserted that it is not missing in the
// IngestHandler#validate() so that we can return a custom error message.
[
partitionedBy = PartitionGranularity()
]
[
clusteredBy = ClusteredBy()
]
{
if (clusteredBy != null && partitionedBy == null) {
throw org.apache.druid.sql.calcite.parser.DruidSqlParserUtils.problemParsing(
"CLUSTERED BY found before PARTITIONED BY, CLUSTERED BY must come after the PARTITIONED BY clause"
);
}
}
// EOF is also present in SqlStmtEof but EOF is a special case and a single EOF can be consumed multiple times.
// The reason for adding EOF here is to ensure that we create a DruidSqlReplace node after the syntax has been
// validated and throw SQL syntax errors before performing validations in the DruidSqlReplace which can overshadow the
// actual error message.
{
sqlInsert = new SqlInsert(s.end(source), SqlNodeList.EMPTY, destination, source, columnList);
return DruidSqlReplace.create(sqlInsert, partitionedBy, clusteredBy, exportFileFormat, replaceTimeQuery);
}
}
SqlNode ReplaceTimeQuery() :
{
SqlNode replaceQuery;
}
{
(
{ replaceQuery = SqlLiteral.createCharString("ALL", getPos()); }
|
// We parse all types of conditions and throw an exception if it is not supported to keep the parsing simple
replaceQuery = Where()
)
{
return replaceQuery;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Druid note: this file is copied from core/src/main/codegen/templates/Parser.jj in Calcite 1.35.0, with changes to
* to add two elements of Druid syntax to the FROM clause:
*
* id [ () ]
*
* And
*
* TABLE(()) ()
*
* These changes were originally in https://github.com/apache/druid/pull/13360 as a patch script (sql/edit-parser.py),
* then later moved to this copied-and-edited file in https://github.com/apache/druid/pull/13553.*
*
* This file prefixes the required production rules with 'Druid' so that the whole FROM production rule can be
* derived from this file itself. The production clause is injected in the grammar using the maven replace plugin in
* sql module's pom.
*/
/**
* Parses the FROM clause for a SELECT.
*
* FROM is mandatory in standard SQL, optional in dialects such as MySQL,
* PostgreSQL. The parser allows SELECT without FROM, but the validator fails
* if conformance is, say, STRICT_2003.
*/
SqlNode DruidFromClause() :
{
SqlNode e, e2;
SqlLiteral joinType;
}
{
e = DruidJoin()
(
// Comma joins should only occur at top-level in the FROM clause.
// Valid:
// * FROM a, b
// * FROM (a CROSS JOIN b), c
// Not valid:
// * FROM a CROSS JOIN (b, c)
LOOKAHEAD(1)
{ joinType = JoinType.COMMA.symbol(getPos()); }
e2 = DruidJoin() {
e = new SqlJoin(joinType.getParserPosition(),
e,
SqlLiteral.createBoolean(false, joinType.getParserPosition()),
joinType,
e2,
JoinConditionType.NONE.symbol(SqlParserPos.ZERO),
null);
}
)*
{ return e; }
}
SqlNode DruidJoin() :
{
SqlNode e;
}
{
e = DruidTableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN)
(
LOOKAHEAD(2)
e = DruidJoinTable(e)
)*
{
return e;
}
}
/** Matches "LEFT JOIN t ON ...", "RIGHT JOIN t USING ...", "JOIN t". */
SqlNode DruidJoinTable(SqlNode e) :
{
SqlNode e2, condition;
final SqlLiteral natural, joinType, on, using;
SqlNodeList list;
}
{
// LOOKAHEAD(3) is needed here rather than a LOOKAHEAD(2) because JavaCC
// calculates minimum lookahead count incorrectly for choice that contains
// zero size child. For instance, with the generated code,
// "LOOKAHEAD(2, Natural(), JoinType())"
// returns true immediately if it sees a single "" token. Where we
// expect the lookahead succeeds after " ".
//
// For more information about the issue,
// see https://github.com/javacc/javacc/issues/86
//
// We allow CROSS JOIN (joinType = CROSS_JOIN) to have a join condition,
// even though that is not valid SQL; the validator will catch it.
LOOKAHEAD(3)
natural = Natural()
joinType = JoinType()
e2 = DruidTableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN)
(
{ on = JoinConditionType.ON.symbol(getPos()); }
condition = Expression(ExprContext.ACCEPT_SUB_QUERY) {
return new SqlJoin(joinType.getParserPosition(),
e,
natural,
joinType,
e2,
on,
condition);
}
|
{ using = JoinConditionType.USING.symbol(getPos()); }
list = ParenthesizedSimpleIdentifierList() {
return new SqlJoin(joinType.getParserPosition(),
e,
natural,
joinType,
e2,
using,
new SqlNodeList(list, Span.of(using).end(this)));
}
|
{
return new SqlJoin(joinType.getParserPosition(),
e,
natural,
joinType,
e2,
JoinConditionType.NONE.symbol(joinType.getParserPosition()),
null);
}
)
|
{ joinType = JoinType.CROSS.symbol(getPos()); }
e2 = DruidTableRef2(true) {
if (!this.conformance.isApplyAllowed()) {
throw SqlUtil.newContextException(getPos(), RESOURCE.applyNotAllowed());
}
return new SqlJoin(joinType.getParserPosition(),
e,
SqlLiteral.createBoolean(false, joinType.getParserPosition()),
joinType,
e2,
JoinConditionType.NONE.symbol(SqlParserPos.ZERO),
null);
}
|
{ joinType = JoinType.LEFT.symbol(getPos()); }
e2 = DruidTableRef2(true) {
if (!this.conformance.isApplyAllowed()) {
throw SqlUtil.newContextException(getPos(), RESOURCE.applyNotAllowed());
}
return new SqlJoin(joinType.getParserPosition(),
e,
SqlLiteral.createBoolean(false, joinType.getParserPosition()),
joinType,
e2,
JoinConditionType.ON.symbol(SqlParserPos.ZERO),
SqlLiteral.createBoolean(true, joinType.getParserPosition()));
}
}
/**
* Parses a table reference in a FROM clause, not lateral unless LATERAL
* is explicitly specified.
*/
SqlNode DruidTableRef() :
{
final SqlNode e;
}
{
e = DruidTableRef3(ExprContext.ACCEPT_QUERY, false) { return e; }
}
SqlNode DruidTableRef1(ExprContext exprContext) :
{
final SqlNode e;
}
{
e = DruidTableRef3(exprContext, false) { return e; }
}
/**
* Parses a table reference in a FROM clause.
*/
SqlNode DruidTableRef2(boolean lateral) :
{
final SqlNode e;
}
{
e = DruidTableRef3(ExprContext.ACCEPT_QUERY, lateral) { return e; }
}
SqlNode DruidTableRef3(ExprContext exprContext, boolean lateral) :
{
final SqlIdentifier tableName;
SqlNode tableRef;
List paramList;
final SqlIdentifier alias;
final Span s;
SqlNodeList args;
final SqlNodeList columnAliasList;
SqlUnnestOperator unnestOp = SqlStdOperatorTable.UNNEST;
SqlNodeList extendList = null;
}
{
(
LOOKAHEAD(2)
tableName = CompoundTableIdentifier()
( tableRef = TableHints(tableName) | { tableRef = tableName; } )
// BEGIN: Druid-specific code
[
paramList = FunctionParameterList(ExprContext.ACCEPT_NONCURSOR)
{
tableRef = ParameterizeOperator.PARAM.createCall(tableRef, paramList);
}
]
// END: Druid-specific code
tableRef = Over(tableRef)
[ tableRef = Snapshot(tableRef) ]
[ tableRef = MatchRecognize(tableRef) ]
|
LOOKAHEAD(2)
[ { lateral = true; } ]
tableRef = ParenthesizedExpression(exprContext)
tableRef = Over(tableRef)
tableRef = addLateral(tableRef, lateral)
[ tableRef = MatchRecognize(tableRef) ]
|
{ s = span(); }
args = ParenthesizedQueryOrCommaList(ExprContext.ACCEPT_SUB_QUERY)
[
{
unnestOp = SqlStdOperatorTable.UNNEST_WITH_ORDINALITY;
}
]
{
tableRef = unnestOp.createCall(s.end(this), (List) args);
}
|
[ { lateral = true; } ]
tableRef = TableFunctionCall()
// BEGIN: Druid-specific code
[
[ ]
extendList = ExtendList()
{
tableRef = ExtendOperator.EXTEND.createCall(
Span.of(tableRef, extendList).pos(), tableRef, extendList);
}
]
// END: Druid-specific code
tableRef = addLateral(tableRef, lateral)
|
tableRef = ExtendedTableRef()
)
[
LOOKAHEAD(2)
tableRef = Pivot(tableRef)
]
[
LOOKAHEAD(2)
tableRef = Unpivot(tableRef)
]
[
[ ] alias = SimpleIdentifier()
(
columnAliasList = ParenthesizedSimpleIdentifierList()
| { columnAliasList = null; }
)
{
// Standard SQL (and Postgres) allow applying "AS alias" to a JOIN,
// e.g. "FROM (a CROSS JOIN b) AS c". The new alias obscures the
// internal aliases, and columns cannot be referenced if they are
// not unique. TODO: Support this behavior; see
// [CALCITE-5168] Allow AS after parenthesized JOIN
checkNotJoin(tableRef);
if (columnAliasList == null) {
tableRef = SqlStdOperatorTable.AS.createCall(
Span.of(tableRef).end(this), tableRef, alias);
} else {
List idList = new ArrayList();
idList.add(tableRef);
idList.add(alias);
idList.addAll(columnAliasList.getList());
tableRef = SqlStdOperatorTable.AS.createCall(
Span.of(tableRef).end(this), idList);
}
}
]
[ tableRef = Tablesample(tableRef) ]
{ return tableRef; }
}
SqlNodeList ParenthesizedKeyValueOptionCommaList() :
{
final Span s;
final List list = new ArrayList();
}
{
{ s = span(); }
AddKeyValueOption(list)
(
AddKeyValueOption(list)
)*
{
return new SqlNodeList(list, s.end(this));
}
}
/**
* Parses an option with format key=val whose key is a simple identifier or string literal
* and value is a string literal.
*/
void AddKeyValueOption(List list) :
{
final SqlNode key;
final SqlNode value;
}
{
(
key = SimpleIdentifier()
|
key = StringLiteral()
)
value = StringLiteral() {
list.add(key);
list.add(value);
}
}
/** Parses an option value (either a string or a numeric) and adds to a list. */
void AddOptionValue(List list) :
{
final SqlNode value;
}
{
(
value = NumericLiteral() { list.add(value); }
|
value = StringLiteral() { list.add(value); }
)
}
/**
* Parses a literal list separated by comma. The literal is either a string or a numeric.
*/
SqlNodeList ParenthesizedLiteralOptionCommaList() :
{
final Span s;
final List list = new ArrayList();
}
{
{ s = span(); }
AddOptionValue(list) ( AddOptionValue(list) )*
{
return new SqlNodeList(list, s.end(this));
}
}
void AddHint(List hints) :
{
final SqlIdentifier hintName;
final SqlNodeList hintOptions;
final SqlHint.HintOptionFormat optionFormat;
}
{
hintName = SimpleIdentifier()
(
LOOKAHEAD(5)
hintOptions = ParenthesizedKeyValueOptionCommaList() {
optionFormat = SqlHint.HintOptionFormat.KV_LIST;
}
|
LOOKAHEAD(3)
hintOptions = ParenthesizedSimpleIdentifierList() {
optionFormat = SqlHint.HintOptionFormat.ID_LIST;
}
|
LOOKAHEAD(3)
hintOptions = ParenthesizedLiteralOptionCommaList() {
optionFormat = SqlHint.HintOptionFormat.LITERAL_LIST;
}
|
LOOKAHEAD(2)
[ ]
{
hintOptions = SqlNodeList.EMPTY;
optionFormat = SqlHint.HintOptionFormat.EMPTY;
}
)
{
hints.add(
new SqlHint(Span.of(hintOptions).end(this), hintName, hintOptions,
optionFormat));
}
}
/** Parses hints following a table reference,
* and returns the wrapped table reference. */
SqlNode TableHints(SqlIdentifier tableName) :
{
final List hints = new ArrayList();
}
{
AddHint(hints) ( AddHint(hints) )* {
final SqlParserPos pos = Span.of(tableName).addAll(hints).end(this);
final SqlNodeList hintList = new SqlNodeList(hints, pos);
return new SqlTableRef(pos, tableName, hintList);
}
}
/**
* Parses a leaf SELECT expression without ORDER BY.
*/
SqlSelect SqlSelect() :
{
final List keywords = new ArrayList();
final SqlLiteral keyword;
final SqlNodeList keywordList;
final List selectList = new ArrayList();
final SqlNode fromClause;
final SqlNode where;
final SqlNodeList groupBy;
final SqlNode having;
final SqlNodeList windowDecls;
final SqlNode qualify;
final List hints = new ArrayList();
final Span s;
}
{
{ s = span(); }
[ AddHint(hints) ( AddHint(hints) )* ]
SqlSelectKeywords(keywords)
(
{
keywords.add(SqlSelectKeyword.STREAM.symbol(getPos()));
}
)?
(
keyword = AllOrDistinct() { keywords.add(keyword); }
)?
{
keywordList = new SqlNodeList(keywords, s.addAll(keywords).pos());
}
AddSelectItem(selectList)
( AddSelectItem(selectList) )*
(
fromClause = FromClause()
( where = Where() | { where = null; } )
( groupBy = GroupBy() | { groupBy = null; } )
( having = Having() | { having = null; } )
( windowDecls = Window() | { windowDecls = null; } )
( qualify = Qualify() | { qualify = null; } )
|
E() {
fromClause = null;
where = null;
groupBy = null;
having = null;
windowDecls = null;
qualify = null;
}
)
{
return new SqlSelect(s.end(this), keywordList,
new SqlNodeList(selectList, Span.of(selectList).pos()),
fromClause, where, groupBy, having, windowDecls, qualify,
null, null, null, new SqlNodeList(hints, getPos()));
}
}
/*
* Abstract production:
*
* void SqlSelectKeywords(List keywords)
*
* Parses dialect-specific keywords immediately following the SELECT keyword.
*/
/**
* Parses an EXPLAIN PLAN statement.
*/
SqlNode SqlExplain() :
{
SqlNode stmt;
SqlExplainLevel detailLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES;
SqlExplain.Depth depth;
final SqlExplainFormat format;
}
{
[ detailLevel = ExplainDetailLevel() ]
depth = ExplainDepth()
(
LOOKAHEAD(2)
{ format = SqlExplainFormat.XML; }
|
LOOKAHEAD(2)
{ format = SqlExplainFormat.JSON; }
|
{ format = SqlExplainFormat.DOT; }
|
{ format = SqlExplainFormat.TEXT; }
)
stmt = SqlQueryOrDml() {
return new SqlExplain(getPos(),
stmt,
detailLevel.symbol(SqlParserPos.ZERO),
depth.symbol(SqlParserPos.ZERO),
format.symbol(SqlParserPos.ZERO),
nDynamicParams);
}
}
/** Parses a query (SELECT or VALUES)
* or DML statement (INSERT, UPDATE, DELETE, MERGE). */
SqlNode SqlQueryOrDml() :
{
SqlNode stmt;
}
{
(
stmt = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY)
|
stmt = SqlInsert()
|
stmt = SqlDelete()
|
stmt = SqlUpdate()
|
stmt = SqlMerge()
) { return stmt; }
}
/**
* Parses WITH TYPE | WITH IMPLEMENTATION | WITHOUT IMPLEMENTATION modifier for
* EXPLAIN PLAN.
*/
SqlExplain.Depth ExplainDepth() :
{
}
{
(
LOOKAHEAD(2)
{
return SqlExplain.Depth.TYPE;
}
|
{
return SqlExplain.Depth.PHYSICAL;
}
|
{
return SqlExplain.Depth.LOGICAL;
}
|
{
return SqlExplain.Depth.PHYSICAL;
}
)
}
/**
* Parses INCLUDING ALL ATTRIBUTES modifier for EXPLAIN PLAN.
*/
SqlExplainLevel ExplainDetailLevel() :
{
SqlExplainLevel level = SqlExplainLevel.EXPPLAN_ATTRIBUTES;
}
{
(
{
level = SqlExplainLevel.NO_ATTRIBUTES;
}
|
[ { level = SqlExplainLevel.ALL_ATTRIBUTES; } ]
{
}
)
{
return level;
}
}
/**
* Parses a DESCRIBE statement.
*/
SqlNode SqlDescribe() :
{
final Span s;
final SqlIdentifier table;
final SqlIdentifier column;
final SqlIdentifier id;
final SqlNode stmt;
}
{
{ s = span(); }
(
LOOKAHEAD(2) ( | | )
id = CompoundIdentifier() {
// DESCRIBE DATABASE and DESCRIBE CATALOG currently do the same as
// DESCRIBE SCHEMA but should be different. See
// [CALCITE-1221] Implement DESCRIBE DATABASE, CATALOG, STATEMENT
return new SqlDescribeSchema(s.end(id), id);
}
|
// Use syntactic lookahead to determine whether a table name is coming.
// We do not allow SimpleIdentifier() because that includes .
LOOKAHEAD(
|
|
|
|
|
| )
()?
table = CompoundIdentifier()
( column = SimpleIdentifier() | { column = null; } )
{
return new SqlDescribeTable(s.add(table).addIf(column).pos(),
table, column);
}
|
(LOOKAHEAD(1) )?
stmt = SqlQueryOrDml() {
// DESCRIBE STATEMENT currently does the same as EXPLAIN. See
// [CALCITE-1221] Implement DESCRIBE DATABASE, CATALOG, STATEMENT
final SqlExplainLevel detailLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES;
final SqlExplain.Depth depth = SqlExplain.Depth.PHYSICAL;
final SqlExplainFormat format = SqlExplainFormat.TEXT;
return new SqlExplain(s.end(stmt),
stmt,
detailLevel.symbol(SqlParserPos.ZERO),
depth.symbol(SqlParserPos.ZERO),
format.symbol(SqlParserPos.ZERO),
nDynamicParams);
}
)
}
/**
* Parses a CALL statement.
*/
SqlNode SqlProcedureCall() :
{
final Span s;
SqlNode routineCall;
}
{
{
s = span();
}
routineCall = NamedRoutineCall(
SqlFunctionCategory.USER_DEFINED_PROCEDURE,
ExprContext.ACCEPT_SUB_QUERY)
{
return SqlStdOperatorTable.PROCEDURE_CALL.createCall(
s.end(routineCall), routineCall);
}
}
SqlNode NamedRoutineCall(
SqlFunctionCategory routineType,
ExprContext exprContext) :
{
final SqlIdentifier name;
final List list = new ArrayList();
final Span s;
}
{
name = CompoundIdentifier() {
s = span();
}
[
AddArg0(list, exprContext)
(
{
// a comma-list can't appear where only a query is expected
checkNonQueryExpression(exprContext);
}
AddArg(list, exprContext)
)*
]
{
return createCall(name, s.end(this), routineType, null, list);
}
}
/**
* Table parameter of a table function.
* The input table with set semantics may be partitioned/ordered on one or more columns.
*/
SqlNode TableParam() :
{
final Span s;
final SqlNodeList partitionList;
final SqlNodeList orderList;
SqlNode tableRef;
}
{
{ s = span(); }
tableRef = ExplicitTable(getPos())
(
partitionList = SimpleIdentifierOrList()
| { partitionList = SqlNodeList.EMPTY; }
)
(
orderList = OrderByOfSetSemanticsTable()
| { orderList = SqlNodeList.EMPTY; }
)
{ return CreateSetSemanticsTableIfNeeded(s, tableRef, partitionList, orderList); }
}
SqlNode PartitionedQueryOrQueryOrExpr(ExprContext exprContext) :
{
SqlNode e;
}
{
e = OrderedQueryOrExpr(exprContext)
e = PartitionedByAndOrderBy(e)
{ return e; }
}
SqlNode PartitionedByAndOrderBy(SqlNode e) :
{
final Span s;
final SqlNodeList partitionList;
final SqlNodeList orderList;
}
{
{ s = span(); }
(
partitionList = SimpleIdentifierOrList()
| { partitionList = SqlNodeList.EMPTY; }
)
(
orderList = OrderByOfSetSemanticsTable()
| { orderList = SqlNodeList.EMPTY; }
)
{ return CreateSetSemanticsTableIfNeeded(s, e, partitionList, orderList); }
}
SqlNodeList OrderByOfSetSemanticsTable() :
{
final List list = new ArrayList();
final Span s;
}
{
{ s = span(); }
(
LOOKAHEAD(2)
AddOrderItem(list)
(
// NOTE jvs 6-Feb-2004: See comments at top of file for why
// hint is necessary here.
LOOKAHEAD(2) AddOrderItem(list)
)*
{
return new SqlNodeList(list, s.addAll(list).pos());
}
|
AddOrderItem(list)
{
return new SqlNodeList(list, s.addAll(list).pos());
}
)
}
SqlNode CreateSetSemanticsTableIfNeeded(
final Span s,
final SqlNode e,
final SqlNodeList partitionList,
final SqlNodeList orderList) :
{
}
{
{
if (partitionList.isEmpty() && orderList.isEmpty()) {
return e;
} else {
return SqlStdOperatorTable.SET_SEMANTICS_TABLE.createCall(
s.pos(), e, partitionList, orderList);
}
}
}
/**
* Parses an INSERT statement.
*/
SqlNode SqlInsert() :
{
final List keywords = new ArrayList();
final SqlNodeList keywordList;
final SqlIdentifier tableName;
SqlNode tableRef;
SqlNode source;
final SqlNodeList columnList;
final Span s;
final Pair p;
}
{
(
|
{ keywords.add(SqlInsertKeyword.UPSERT.symbol(getPos())); }
)
{ s = span(); }
SqlInsertKeywords(keywords) {
keywordList = new SqlNodeList(keywords, s.addAll(keywords).pos());
}
tableName = CompoundTableIdentifier()
( tableRef = TableHints(tableName) | { tableRef = tableName; } )
[ LOOKAHEAD(5) tableRef = ExtendTable(tableRef) ]
(
LOOKAHEAD(2)
p = ParenthesizedCompoundIdentifierList() {
if (p.right.size() > 0) {
tableRef = extend(tableRef, p.right);
}
if (p.left.size() > 0) {
columnList = p.left;
} else {
columnList = null;
}
}
| { columnList = null; }
)
source = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) {
return new SqlInsert(s.end(source), keywordList, tableRef, source,
columnList);
}
}
/*
* Abstract production:
*
* void SqlInsertKeywords(List keywords)
*
* Parses dialect-specific keywords immediately following the INSERT keyword.
*/
/**
* Parses a DELETE statement.
*/
SqlNode SqlDelete() :
{
final SqlIdentifier tableName;
SqlNode tableRef;
final SqlIdentifier alias;
final SqlNode where;
final Span s;
}
{
{
s = span();
}
tableName = CompoundTableIdentifier()
( tableRef = TableHints(tableName) | { tableRef = tableName; } )
[ tableRef = ExtendTable(tableRef) ]
( [ ] alias = SimpleIdentifier() | { alias = null; } )
( where = Where() | { where = null; } )
{
return new SqlDelete(s.add(tableRef).addIf(alias).addIf(where).pos(),
tableRef, where, null, alias);
}
}
/**
* Parses an UPDATE statement.
*/
SqlNode SqlUpdate() :
{
final SqlIdentifier tableName;
SqlNode tableRef;
final SqlIdentifier alias;
final SqlNode where;
final SqlNodeList sourceExpressionList;
final SqlNodeList targetColumnList;
SqlIdentifier id;
final Span s;
}
{
{
s = span();
targetColumnList = new SqlNodeList(s.pos());
sourceExpressionList = new SqlNodeList(s.pos());
}
tableName = CompoundTableIdentifier()
( tableRef = TableHints(tableName) | { tableRef = tableName; } )
[ tableRef = ExtendTable(tableRef) ]
( [ ] alias = SimpleIdentifier() | { alias = null; } )
id = SimpleIdentifier() {
targetColumnList.add(id);
}
// TODO: support DEFAULT also
AddExpression(sourceExpressionList, ExprContext.ACCEPT_SUB_QUERY)
(
id = SimpleIdentifier() { targetColumnList.add(id); }
AddExpression(sourceExpressionList, ExprContext.ACCEPT_SUB_QUERY)
)*
( where = Where() | { where = null; } )
{
final SqlParserPos pos = s.addAll(targetColumnList)
.addAll(sourceExpressionList).addIf(where).pos();
return new SqlUpdate(pos, tableRef, targetColumnList,
sourceExpressionList, where, null, alias);
}
}
/**
* Parses a MERGE statement.
*/
SqlNode SqlMerge() :
{
final SqlIdentifier tableName;
SqlNode tableRef;
final SqlIdentifier alias;
final SqlNode sourceTableRef;
final SqlNode condition;
final SqlUpdate updateCall;
final SqlInsert insertCall;
final Span s;
}
{
{ s = span(); } tableName = CompoundTableIdentifier()
( tableRef = TableHints(tableName) | { tableRef = tableName; } )
[ tableRef = ExtendTable(tableRef) ]
( [ ] alias = SimpleIdentifier() | { alias = null; } )
sourceTableRef = TableRef()
condition = Expression(ExprContext.ACCEPT_SUB_QUERY)
(
LOOKAHEAD(2)
updateCall = WhenMatchedClause(tableRef, alias)
( insertCall = WhenNotMatchedClause(tableRef) | { insertCall = null; } )
|
{ updateCall = null; }
insertCall = WhenNotMatchedClause(tableRef)
)
{
final SqlParserPos pos = s.addIf(updateCall).addIf(insertCall).pos();
return new SqlMerge(pos, tableRef, condition, sourceTableRef,
updateCall, insertCall, null, alias);
}
}
SqlUpdate WhenMatchedClause(SqlNode table, SqlIdentifier alias) :
{
SqlIdentifier id;
final Span s;
final SqlNodeList updateColumnList = new SqlNodeList(SqlParserPos.ZERO);
final SqlNodeList updateExprList = new SqlNodeList(SqlParserPos.ZERO);
}
{
{ s = span(); }