Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
com.bigdata.rdf.sail.sparql.ast.sparql.jjt Maven / Gradle / Ivy
/**
Copyright (C) SYSTAP, LLC DBA Blazegraph 2006-2016. All rights reserved.
Contact:
SYSTAP, LLC DBA Blazegraph
2501 Calvert ST NW #106
Washington, DC 20008
[email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* @author: Bryan Thompson
* @openrdf
*
* "make javacc" to rebuild. Be sure to first remove the sparql.jj file
* so it will generate a new one.
*
* Updated 6/7/14 by Mike Personick. Updated to capture the delta between the
* Sesame 2.6.10/2.7.12 sparql.jjt files.
*/
/*
* Copyright Aduna (http://www.aduna-software.com/) (c) 1997-2007.
*
* Licensed under the Aduna BSD-style license.
*/
/*
* SPARQL JJTree input file for JavaCC, a Java Compiler Compiler. JavaCC can be
* downloaded from https://javacc.dev.java.net/
*
* author: Arjohn Kampman
*/
options {
MULTI=true;
VISITOR=true;
VISITOR_EXCEPTION="VisitorException";
STATIC=false;
UNICODE_INPUT=true;
JAVA_UNICODE_ESCAPE=true; // pre-process unicode escapes, see section A.2 of spec
}
PARSER_BEGIN(SyntaxTreeBuilder)
package com.bigdata.rdf.sail.sparql.ast;
import java.io.StringReader;
import org.openrdf.model.URI;
import org.openrdf.model.vocabulary.RDF;
import org.openrdf.model.vocabulary.XMLSchema;
import org.openrdf.query.algebra.Compare.CompareOp;
import org.openrdf.query.algebra.MathExpr.MathOp;
import org.openrdf.rio.RDFParser.DatatypeHandling;
public class SyntaxTreeBuilder {
/**
* Parses the supplied SPARQL query and builds a syntax tree from it.
*
* @param query A SPARQL query string.
* @return The root of the syntax tree.
* @throws TokenMgrError If the query was syntactically incorrect.
* @throws ParseException If the query was syntactically incorrect.
*/
public static ASTQueryContainer parseQuery(final String query)
throws TokenMgrError, ParseException
{
final SyntaxTreeBuilder stb = new SyntaxTreeBuilder( new StringReader(query) );
// Set size of tab to 1 to force tokenmanager to report correct column
// index for substring splitting of service graph pattern.
stb.jj_input_stream.setTabSize(1);
final ASTQueryContainer container = stb.QueryContainer();
container.setSourceString(query);
return container;
}
/**
* Parses the supplied SPARQL update sequence and builds a syntax tree from it.
*
* @param sequence A SPARQL update sequence string.
* @return The root of the syntax tree.
* @throws TokenMgrError If the update sequence was syntactically incorrect.
* @throws ParseException If the update sequence was syntactically incorrect.
*/
public static ASTUpdateSequence parseUpdateSequence(final String sequence)
throws TokenMgrError, ParseException
{
final SyntaxTreeBuilder stb = new SyntaxTreeBuilder( new StringReader(sequence) );
final ASTUpdateSequence seq = stb.UpdateSequence();
seq.setSourceString(sequence);
return seq;
}
/**
* Trims n character from the start and end of the supplied string.
*/
private static String _trimString(String s, int n) {
if (s.length() >= 2 * n) {
s = s.substring(n, s.length() - n);
}
return s;
}
private String readToMatchingBrace() {
StringBuilder sb = new StringBuilder();
Token tok;
int nesting = 1;
boolean previousTokenDtSep = false;
while (true) {
tok = getToken(1);
if (!previousTokenDtSep && ! tok.image.equals("^^") && !tok.image.startsWith("@")) {
sb.append(" ");
}
if (tok.kind == LBRACE) nesting++;
if (tok.kind == RBRACE) {
nesting--;
if (nesting == 0) break;
}
// To provide correct line numbers (see https://jira.blazegraph.com/browse/BLZG-1397)
// we should keep line breaks and one-line comments. So, we are adding special tokens
// (comments and and new line char) per https://javacc.java.net/doc/tokenmanager.html
if (tok.specialToken != null) {
// The above statement determines that there is a special token
Token tmp_t = tok.specialToken;
while (tmp_t.specialToken != null)
tmp_t = tmp_t.specialToken;
// The above line walks back the special token chain until it
// reaches the first special token after the previous regular
// token.
while (tmp_t != null) {
sb.append(tmp_t.image);
tmp_t = tmp_t.next;
}
// The above loop now walks the special token chain in the forward
// direction printing them in the process.
}
sb.append(tok.image);
previousTokenDtSep = tok.image.equals("^^");
tok = getNextToken();
}
return sb.toString();
}
}
PARSER_END(SyntaxTreeBuilder)
//
// tokens
//
TOKEN :
{
// Whitespace characters
<#WS_CHAR: " " | "\t" | "\r" | "\f">
}
SKIP :
{
>
}
SPECIAL_TOKEN :
{
|
}
// Special characters
TOKEN :
{
|
|
|
|
|
|
|
|
|
|
| ">
|
|
| =">
|
|
|
|
|
|
|
|
|
|
|
| (|)* >
| (|)* >
}
TOKEN :
{
// Note: case-sensitive!
}
TOKEN [IGNORE_CASE] :
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
}
TOKEN [IGNORE_CASE] : // Bigdata extension specific tokens
{
|
|
| >
|
|
|
|
| >">
// RDFParserOptions (for LOAD)
|
|
|
|
|
|
|
// Managing Truth Maintenance
|
|
|
|
}
TOKEN [IGNORE_CASE] : // SPARQL 1.1 Update-specific tokens
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
}
TOKEN:
{
", "\"", "{", "}", "|", "^", "`", "\\", "\u0000"-"\u0020"])* ">">
| )? ":">
| )? ":" > // Note: PN_LOCAL not optional, case handled by PNAME_NS
| | ) ((|".")* )? >
| >
| >
| )+ ("-" (|)+)*>
| )+>
| >
| >
| |)>
| <#DECIMAL1: ()+ "." ()*>
| <#DECIMAL2: "." ()+>
| >
| >
| ||)>
| <#DOUBLE1: ()+ "." ()* >
| <#DOUBLE2: "." ()+ >
| <#DOUBLE3: ()+ >
| <#EXPONENT: ["e","E"] (["+","-"])? ()+>
| >
| >
| |)* "'">
| |)* "\"">
| |) )* "'''">
| |) )* "\"\"\"">
| <#SAFE_CHAR1: (~["'", "\\", "\r", "\n"])>
| <#SAFE_CHAR2: (~["\"", "\\", "\r", "\n"])>
| <#SAFE_CHAR_LONG1: (~["'","\\"])>
| <#SAFE_CHAR_LONG2: (~["\"","\\"])>
| <#ECHAR: "\\" ["t", "b", "n", "r", "f", "\\", "\"", "'"]>
| <#HEX: ["0"-"9"] | ["A"-"F"] | ["a"-"f"]>
| <#ALPHA: ["a"-"z","A"-"Z"]>
| <#NUM: ["0"-"9"]>
}
TOKEN:
{
<#PN_CHARS_BASE:
| ["\u00C0"-"\u00D6"] | ["\u00D8"-"\u00F6"] | ["\u00F8"-"\u02FF"] | ["\u0370"-"\u037D"]
| ["\u037F"-"\u1FFF"] | ["\u200C"-"\u200D"] | ["\u2070"-"\u218F"] | ["\u2C00"-"\u2FEF"]
| ["\u3001"-"\uD7FF"] | ["\uF900"-"\uFDCF"] | ["\uFDF0"-"\uFFFD"]
// | ["\u10000"-"\uEFFFF"] FIXME: JavaCC/Java can't handle this?
>
| <#PN_CHARS_U: | "_">
| <#VAR_CHAR: | | "\u00B7" | ["\u0300"-"\u036F"] | ["\u203F"-"\u2040"]>
| <#PN_CHARS: | "-" | | "\u00B7" | ["\u0300"-"\u036F"] | ["\u203F"-"\u2040"]>
| <#PN_PREFIX: ( ( | ".")* )?>
| <#PN_LOCAL: ( | ":" | | ) ( ( | "." | ":" | )* ( | ":" | ) )?>
| <#PLX: | >
| <#PERCENT: "%" >
| <#PN_LOCAL_ESC: "\\" [ "_", "~", ".", "-", "!", "$", "&", "\"", "(", ")", "*", "+", ",", ";", "=", "/", "?", "#", "@", "%" ]>
| <#VARNAME: ( | ) ()*>
}
//
// grammar
//
ASTUpdateSequence UpdateSequence():
{}
{
UpdateContainer() [ UpdateSequence() ]
{ return jjtThis; }
}
ASTUpdateContainer UpdateContainer():
{}
{
Prolog() [ Update() ]
{ return jjtThis; }
}
ASTQueryContainer QueryContainer():
{}
{
Prolog() Query()
{ return jjtThis; }
}
void Prolog() #void :
{}
{
( PrefixDecl() | BaseDecl() )*
}
void BaseDecl() :
{ Token t; }
{
t = {jjtThis.setIRI(_trimString(t.image, 1));}
}
void PrefixDecl() :
{ Token prefix; }
{
prefix = IRI()
{
// Remove trailing colon from prefix
String prefixStr = prefix.image;
prefixStr = prefixStr.substring(0, prefixStr.length() - 1);
jjtThis.setPrefix(prefixStr);
}
}
void Query() #void :
{}
{
(SelectQuery() | ConstructQuery() | DescribeQuery() | AskQuery()) }
void SelectQuery() :
{}
{
Select()
( DatasetClause() )*
( NamedSubquery() )*
WhereClause()
SolutionModifier()
[BindingsClause()]
}
void SubSelect() #SelectQuery : // subselect does not accept dataset clauses
{}
{
Select()
WhereClause()
SolutionModifier()
[BindingsClause()]
}
void Select() :
{}
{
[
{jjtThis.setDistinct(true);}
|
{jjtThis.setReduced(true);}
]
(
{ jjtThis.setWildcard(true); }
|
( ProjectionElem() )+
)
}
void ProjectionElem() :
{}
{
Var()
|
Expression() Var()
}
void ConstructQuery() :
{}
{
Construct()
( DatasetClause() )*
( NamedSubquery() )*
WhereClause()
SolutionModifier()
[BindingsClause()]
}
void Construct() :
{}
{