All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.projectnessie.nessie.cli.grammar.Token Maven / Gradle / Ivy

There is a newer version: 0.101.3
Show newest version
/*
* Generated by: CongoCC Parser Generator. Token.java
*/
package org.projectnessie.nessie.cli.grammar;

import org.projectnessie.nessie.cli.grammar.ast.*;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;


public class Token implements CharSequence, Node.TerminalNode {

    public enum TokenType implements Node.NodeType {
        EOF, USE, DROP, LIST, SHOW, HELP, EXIT, ALTER, MERGE, ASSIGN, CREATE, REVERT,
        CONNECT, SEMICOLON, EQUAL, AT, IF, IN, OF, ON, TO, AND, DRY, LOG, NOT, SET,
        FROM, INTO, VIEW, WITH, ALLOW, FORCE, LIMIT, STATE, TABLE, USING, COMMIT,
        EXISTS, FILTER, NORMAL, REMOVE, CONTENT, DELETES, LICENSE, BEHAVIOR, CONTENTS,
        STARTING, BEHAVIORS, NAMESPACE, TIMESTAMP, REFERENCE, REFERENCES, CONTAINING,
        TRUE, FALSE, BRANCH, TAG, WHITESPACE, STRING_LITERAL, IDENTIFIER, URI, POSITIVE_INT,
        SINGLE_LINE_DASH_COMMENT, SINGLE_LINE_COMMENT, MULTI_LINE_COMMENT, DUMMY,
        INVALID;

        TokenType() {
        }

        TokenType(String literalString) {
            this.literalString = literalString;
        }

        private String literalString;

        public String getLiteralString() {
            return literalString;
        }

        public boolean isUndefined() {
            return this == DUMMY;
        }

        public boolean isInvalid() {
            return this == INVALID;
        }

        public boolean isEOF() {
            return this == EOF;
        }

    }

    private NessieCliLexer tokenSource;
    private TokenType type = TokenType.DUMMY;
    private int beginOffset;
    private int endOffset;
    private boolean unparsed;
    private Node parent;

    public void truncate(int amount) {
        int newEndOffset = Math.max(getBeginOffset(), getEndOffset() - amount);
        setEndOffset(newEndOffset);
    }

    /**
    * It would be extremely rare that an application
    * programmer would use this method. It needs to
    * be public because it is part of the org.projectnessie.nessie.cli.grammar.Node interface.
    */
    public void setBeginOffset(int beginOffset) {
        this.beginOffset = beginOffset;
    }

    /**
    * It would be extremely rare that an application
    * programmer would use this method. It needs to
    * be public because it is part of the org.projectnessie.nessie.cli.grammar.Node interface.
    */
    public void setEndOffset(int endOffset) {
        this.endOffset = endOffset;
    }

    /**
    * @return the NessieCliLexer object that handles
    * location info for the tokens.
    */
    public NessieCliLexer getTokenSource() {
        return this.tokenSource;
    }

    /**
    * It should be exceedingly rare that an application
    * programmer needs to use this method.
    */
    public void setTokenSource(TokenSource tokenSource) {
        this.tokenSource = (NessieCliLexer) tokenSource;
    }

    public boolean isInvalid() {
        return getType().isInvalid();
    }

    /**
    * Return the TokenType of this Token object
    */
    @Override
    public TokenType getType() {
        return type;
    }

    protected void setType(TokenType type) {
        this.type = type;
    }

    /**
    * @return whether this Token represent actual input or was it inserted somehow?
    */
    public boolean isVirtual() {
        return type == TokenType.EOF;
    }

    /**
    * @return Did we skip this token in parsing?
    */
    public boolean isSkipped() {
        return false;
    }

    public int getBeginOffset() {
        return beginOffset;
    }

    public int getEndOffset() {
        return endOffset;
    }

    /**
    * @return the next _cached_ regular (i.e. parsed) token
    * or null
    */
    @Override
    public final Token getNext() {
        return getNextParsedToken();
    }

    /**
    * @return the previous regular (i.e. parsed) token
    * or null
    */
    public final Token getPrevious() {
        Token result = previousCachedToken();
        while (result != null && result.isUnparsed()) {
            result = result.previousCachedToken();
        }
        return result;
    }

    /**
    * @return the next regular (i.e. parsed) token
    */
    private Token getNextParsedToken() {
        Token result = nextCachedToken();
        while (result != null && result.isUnparsed()) {
            result = result.nextCachedToken();
        }
        return result;
    }

    /**
    * @return the next token of any sort (parsed or unparsed or invalid)
    */
    public Token nextCachedToken() {
        if (getType() == TokenType.EOF) return null;
        NessieCliLexer tokenSource = getTokenSource();
        return tokenSource != null ? (Token) tokenSource.nextCachedToken(getEndOffset()) : null;
    }

    public Token previousCachedToken() {
        if (getTokenSource() == null) return null;
        return (Token) getTokenSource().previousCachedToken(getBeginOffset());
    }

    Token getPreviousToken() {
        return previousCachedToken();
    }

    public Token replaceType(TokenType type) {
        Token result = newToken(type, getTokenSource(), getBeginOffset(), getEndOffset());
        getTokenSource().cacheToken(result);
        return result;
    }

    public String getSource() {
        if (type == TokenType.EOF) return "";
        NessieCliLexer ts = getTokenSource();
        int beginOffset = getBeginOffset();
        int endOffset = getEndOffset();
        return ts == null || beginOffset <= 0 && endOffset <= 0 ? null : ts.getText(beginOffset, endOffset);
    }

    protected Token() {
    }

    public Token(TokenType type, NessieCliLexer tokenSource, int beginOffset, int endOffset) {
        this.type = type;
        this.tokenSource = tokenSource;
        this.beginOffset = beginOffset;
        this.endOffset = endOffset;
    }

    public boolean isUnparsed() {
        return unparsed;
    }

    public void setUnparsed(boolean unparsed) {
        this.unparsed = unparsed;
    }

    /**
    * @return An iterator of the tokens preceding this one.
    */
    public Iterator precedingTokens() {
        return new Iterator() {
            Token currentPoint = Token.this;

            public boolean hasNext() {
                return currentPoint.previousCachedToken() != null;
            }

            public Token next() {
                Token previous = currentPoint.previousCachedToken();
                if (previous == null) throw new java.util.NoSuchElementException("No previous token!");
                return currentPoint = previous;
            }

        };
    }

    /**
    * @return a list of the unparsed tokens preceding this one in the order they appear in the input
    */
    public List precedingUnparsedTokens() {
        List result = new ArrayList<>();
        Token t = this.previousCachedToken();
        while (t != null && t.isUnparsed()) {
            result.add(t);
            t = t.previousCachedToken();
        }
        Collections.reverse(result);
        return result;
    }

    /**
    * @return An iterator of the (cached) tokens that follow this one.
    */
    public Iterator followingTokens() {
        return new java.util.Iterator() {
            Token currentPoint = Token.this;

            public boolean hasNext() {
                return currentPoint.nextCachedToken() != null;
            }

            public Token next() {
                Token next = currentPoint.nextCachedToken();
                if (next == null) throw new java.util.NoSuchElementException("No next token!");
                return currentPoint = next;
            }

        };
    }

    public void copyLocationInfo(Token from) {
        setTokenSource(from.getTokenSource());
        setBeginOffset(from.getBeginOffset());
        setEndOffset(from.getEndOffset());
    }

    public void copyLocationInfo(Token start, Token end) {
        setTokenSource(start.getTokenSource());
        if (tokenSource == null) setTokenSource(end.getTokenSource());
        setBeginOffset(start.getBeginOffset());
        setEndOffset(end.getEndOffset());
    }

    public static Token newToken(TokenType type, NessieCliLexer tokenSource) {
        Token result = newToken(type, tokenSource, 0, 0);
        return result;
    }

    public static Token newToken(TokenType type, String image, NessieCliLexer tokenSource) {
        Token newToken = newToken(type, tokenSource);
        return newToken;
    }

    public static Token newToken(TokenType type, NessieCliLexer tokenSource, int beginOffset, int endOffset) {
        switch(type) {
            case NAMESPACE : 
                return new Keyword(TokenType.NAMESPACE, tokenSource, beginOffset, endOffset);
            case CREATE : 
                return new Command(TokenType.CREATE, tokenSource, beginOffset, endOffset);
            case REVERT : 
                return new Command(TokenType.REVERT, tokenSource, beginOffset, endOffset);
            case DRY : 
                return new Keyword(TokenType.DRY, tokenSource, beginOffset, endOffset);
            case FROM : 
                return new Keyword(TokenType.FROM, tokenSource, beginOffset, endOffset);
            case VIEW : 
                return new Keyword(TokenType.VIEW, tokenSource, beginOffset, endOffset);
            case TABLE : 
                return new Keyword(TokenType.TABLE, tokenSource, beginOffset, endOffset);
            case SEMICOLON : 
                return new Semicolon(TokenType.SEMICOLON, tokenSource, beginOffset, endOffset);
            case WHITESPACE : 
                return new Whitespace(TokenType.WHITESPACE, tokenSource, beginOffset, endOffset);
            case TIMESTAMP : 
                return new Keyword(TokenType.TIMESTAMP, tokenSource, beginOffset, endOffset);
            case CONTAINING : 
                return new Keyword(TokenType.CONTAINING, tokenSource, beginOffset, endOffset);
            case IF : 
                return new Keyword(TokenType.IF, tokenSource, beginOffset, endOffset);
            case LICENSE : 
                return new Keyword(TokenType.LICENSE, tokenSource, beginOffset, endOffset);
            case ALLOW : 
                return new Keyword(TokenType.ALLOW, tokenSource, beginOffset, endOffset);
            case BEHAVIORS : 
                return new Keyword(TokenType.BEHAVIORS, tokenSource, beginOffset, endOffset);
            case POSITIVE_INT : 
                return new PositiveIntLiteral(TokenType.POSITIVE_INT, tokenSource, beginOffset, endOffset);
            case IN : 
                return new Keyword(TokenType.IN, tokenSource, beginOffset, endOffset);
            case LOG : 
                return new Keyword(TokenType.LOG, tokenSource, beginOffset, endOffset);
            case TRUE : 
                return new BooleanLiteral(TokenType.TRUE, tokenSource, beginOffset, endOffset);
            case USING : 
                return new Keyword(TokenType.USING, tokenSource, beginOffset, endOffset);
            case NOT : 
                return new Keyword(TokenType.NOT, tokenSource, beginOffset, endOffset);
            case AT : 
                return new Keyword(TokenType.AT, tokenSource, beginOffset, endOffset);
            case FORCE : 
                return new Keyword(TokenType.FORCE, tokenSource, beginOffset, endOffset);
            case AND : 
                return new Keyword(TokenType.AND, tokenSource, beginOffset, endOffset);
            case LIST : 
                return new Command(TokenType.LIST, tokenSource, beginOffset, endOffset);
            case DELETES : 
                return new Keyword(TokenType.DELETES, tokenSource, beginOffset, endOffset);
            case EXIT : 
                return new Command(TokenType.EXIT, tokenSource, beginOffset, endOffset);
            case ALTER : 
                return new Command(TokenType.ALTER, tokenSource, beginOffset, endOffset);
            case INTO : 
                return new Keyword(TokenType.INTO, tokenSource, beginOffset, endOffset);
            case SINGLE_LINE_DASH_COMMENT : 
                return new SingleLineDashComment(TokenType.SINGLE_LINE_DASH_COMMENT, tokenSource, beginOffset, endOffset);
            case SET : 
                return new Keyword(TokenType.SET, tokenSource, beginOffset, endOffset);
            case MERGE : 
                return new Command(TokenType.MERGE, tokenSource, beginOffset, endOffset);
            case STATE : 
                return new Keyword(TokenType.STATE, tokenSource, beginOffset, endOffset);
            case CONNECT : 
                return new Command(TokenType.CONNECT, tokenSource, beginOffset, endOffset);
            case LIMIT : 
                return new Keyword(TokenType.LIMIT, tokenSource, beginOffset, endOffset);
            case ASSIGN : 
                return new Command(TokenType.ASSIGN, tokenSource, beginOffset, endOffset);
            case URI : 
                return new UriLiteral(TokenType.URI, tokenSource, beginOffset, endOffset);
            case DROP : 
                return new Command(TokenType.DROP, tokenSource, beginOffset, endOffset);
            case CONTENTS : 
                return new Keyword(TokenType.CONTENTS, tokenSource, beginOffset, endOffset);
            case SINGLE_LINE_COMMENT : 
                return new SingleLineComment(TokenType.SINGLE_LINE_COMMENT, tokenSource, beginOffset, endOffset);
            case OF : 
                return new Keyword(TokenType.OF, tokenSource, beginOffset, endOffset);
            case REFERENCE : 
                return new Keyword(TokenType.REFERENCE, tokenSource, beginOffset, endOffset);
            case REMOVE : 
                return new Keyword(TokenType.REMOVE, tokenSource, beginOffset, endOffset);
            case SHOW : 
                return new Command(TokenType.SHOW, tokenSource, beginOffset, endOffset);
            case CONTENT : 
                return new Keyword(TokenType.CONTENT, tokenSource, beginOffset, endOffset);
            case IDENTIFIER : 
                return new Ident(TokenType.IDENTIFIER, tokenSource, beginOffset, endOffset);
            case ON : 
                return new Keyword(TokenType.ON, tokenSource, beginOffset, endOffset);
            case HELP : 
                return new Command(TokenType.HELP, tokenSource, beginOffset, endOffset);
            case BRANCH : 
                return new Branch(TokenType.BRANCH, tokenSource, beginOffset, endOffset);
            case STARTING : 
                return new Keyword(TokenType.STARTING, tokenSource, beginOffset, endOffset);
            case FILTER : 
                return new Keyword(TokenType.FILTER, tokenSource, beginOffset, endOffset);
            case MULTI_LINE_COMMENT : 
                return new MultiLineComment(TokenType.MULTI_LINE_COMMENT, tokenSource, beginOffset, endOffset);
            case EXISTS : 
                return new Keyword(TokenType.EXISTS, tokenSource, beginOffset, endOffset);
            case COMMIT : 
                return new Keyword(TokenType.COMMIT, tokenSource, beginOffset, endOffset);
            case USE : 
                return new Command(TokenType.USE, tokenSource, beginOffset, endOffset);
            case EQUAL : 
                return new Equal(TokenType.EQUAL, tokenSource, beginOffset, endOffset);
            case BEHAVIOR : 
                return new Keyword(TokenType.BEHAVIOR, tokenSource, beginOffset, endOffset);
            case WITH : 
                return new Keyword(TokenType.WITH, tokenSource, beginOffset, endOffset);
            case FALSE : 
                return new BooleanLiteral(TokenType.FALSE, tokenSource, beginOffset, endOffset);
            case STRING_LITERAL : 
                return new StringLiteral(TokenType.STRING_LITERAL, tokenSource, beginOffset, endOffset);
            case TO : 
                return new Keyword(TokenType.TO, tokenSource, beginOffset, endOffset);
            case TAG : 
                return new Tag(TokenType.TAG, tokenSource, beginOffset, endOffset);
            case NORMAL : 
                return new Keyword(TokenType.NORMAL, tokenSource, beginOffset, endOffset);
            case REFERENCES : 
                return new Keyword(TokenType.REFERENCES, tokenSource, beginOffset, endOffset);
            case INVALID : 
                return new InvalidToken(tokenSource, beginOffset, endOffset);
            default : 
                return new Token(type, tokenSource, beginOffset, endOffset);
        }
    }

    public String getLocation() {
        return getInputSource() + ":" + getBeginLine() + ":" + getBeginColumn();
    }

    public Node getParent() {
        return parent;
    }

    public void setParent(Node parent) {
        this.parent = parent;
    }

    public boolean isEmpty() {
        return length() == 0;
    }

    public int length() {
        return endOffset - beginOffset;
    }

    public CharSequence subSequence(int start, int end) {
        return getTokenSource().subSequence(beginOffset + start, beginOffset + end);
    }

    public char charAt(int offset) {
        return getTokenSource().charAt(beginOffset + offset);
    }

    /**
    * @deprecated Use toString() instead
    */
    @Deprecated
    public String getImage() {
        return toString();
    }

    @Override
    public String toString() {
        String result = getSource();
        if (result == null) {
            result = getType().getLiteralString();
        }
        return result;
    }

}






© 2015 - 2025 Weber Informatics LLC | Privacy Policy