rationals.converters.analyzers.DefaultLexer Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of org.semanticweb.hermit Show documentation
Show all versions of org.semanticweb.hermit Show documentation
HermiT is reasoner for ontologies written using the Web
Ontology Language (OWL). Given an OWL file, HermiT can determine whether or
not the ontology is consistent, identify subsumption relationships between
classes, and much more.
This is the maven build of HermiT and is designed for people who wish to use
HermiT from within the OWL API. It is now versioned in the main HermiT
version repository, although not officially supported by the HermiT
developers.
The version number of this package is a composite of the HermiT version and
an value representing releases of this packaged version. So, 1.3.7.1 is the
first release of the mavenized version of HermiT based on the 1.3.7 release
of HermiT.
This package includes the Jautomata library
(http://jautomata.sourceforge.net/), and builds with it directly. This
library appears to be no longer under active development, and so a "fork"
seems appropriate. No development is intended or anticipated on this code
base.
package rationals.converters.analyzers;
import java.io.IOException;
import java.io.Reader;
import java.io.StreamTokenizer;
import java.io.StringReader;
import rationals.converters.ConverterException;
/**
* Default lexical analyser for regular expressions.
* This lexer parse a regular expression and treats each letter
* as a label.
*
* @author yroos
* @version $Id: DefaultLexer.java 2 2006-08-24 14:41:48Z oqube $
*/
public class DefaultLexer implements Lexer {
private StreamTokenizer tokenizer;
private boolean end;
private String image;
private int value;
private int current;
/**
* Construct a lexical analyzer to parse given string.
*
* @param in the String to parse.
*/
public DefaultLexer(String in) {
this(new StreamTokenizer(new StringReader(in)));
}
/**
* construct a lexical analyzer to parse the characters from given
* Reader object.
*
* @param rd the character stream to parse.
*/
public DefaultLexer(Reader rd) {
this(new StreamTokenizer(rd));
}
/**
* Construct a lexical analyzer that uses given StreamTokenizer object
* to get data from.
* Note that the tokenizer is reset and given new attributes.
*
* @param st
*/
public DefaultLexer(StreamTokenizer st) {
tokenizer = st;
tokenizer.resetSyntax();
tokenizer.eolIsSignificant(false);
tokenizer.lowerCaseMode(false);
tokenizer.slashSlashComments(true);
tokenizer.quoteChar('\"');
tokenizer.wordChars('0', '9');
tokenizer.whitespaceChars(0, 32);
end = false;
}
public void read() throws ConverterException {
if (end) {
current = END;
return;
}
;
int tk;
try {
tk = tokenizer.nextToken();
} catch (IOException e) {
throw new ConverterException("Unexpected character");
}
if (tk == StreamTokenizer.TT_EOF) {
end = true;
value = 0;
image = "";
current = END;
return;
}
if (tk == StreamTokenizer.TT_WORD) {
image = tokenizer.sval;
if (image.charAt(0) >= '0' && image.charAt(0) <= '9') {
try {
value = Integer.parseInt(tokenizer.sval);
image = "";
if (value == 0)
current = EMPTY;
else {
if (value == 1)
current = EPSILON;
else
current = INT;
}
return;
} catch (Exception e) {
current = UNKNOWN;
return;
}
}
value = 0;
current = LABEL;
return;
}
image = "";
value = 0;
switch (tk) {
case '+':
current = UNION;
return;
case '*':
current = STAR;
return;
case '^':
current = ITERATION;
return;
case '|':
current = SHUFFLE;
return;
case '#':
current = MIX;
return;
case '(':
current = OPEN;
return;
case ')':
current = CLOSE;
return;
case '{':
current = OBRACE;
return;
case '}':
current = CBRACE;
return;
default:
current = LABEL;
image = "" + new Character((char) tk);
}
}
public int lineNumber() {
return tokenizer.lineno();
}
public Object label() {
return image;
}
public int value() {
return value;
}
public int current() {
return current;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy