001: package antlr;
002:
003: /* ANTLR Translator Generator
004: * Project led by Terence Parr at http://www.cs.usfca.edu
005: * Software rights: http://www.antlr.org/license.html
006: */
007:
008: import java.io.*;
009: import java.util.Hashtable;
010: import java.util.Enumeration;
011:
012: import antlr.collections.impl.Vector;
013:
014: /** Static implementation of the TokenManager, used for importVocab option */
015: class ImportVocabTokenManager extends SimpleTokenManager implements
016: Cloneable {
017: private String filename;
018: protected Grammar grammar;
019:
020: // FIXME: it would be nice if the path to the original grammar file was
021: // also searched.
022: ImportVocabTokenManager(Grammar grammar, String filename_,
023: String name_, Tool tool_) {
024: // initialize
025: super (name_, tool_);
026:
027: this .grammar = grammar;
028: this .filename = filename_;
029:
030: // Figure out exactly where the file lives. Check $PWD first,
031: // and then search in -o <output_dir>.
032: //
033: File grammarFile = new File(filename);
034:
035: if (!grammarFile.exists()) {
036: grammarFile = new File(antlrTool.getOutputDirectory(),
037: filename);
038:
039: if (!grammarFile.exists()) {
040: antlrTool.fatalError("Cannot find importVocab file '"
041: + filename + "'");
042: }
043: }
044:
045: setReadOnly(true);
046:
047: // Read a file with lines of the form ID=number
048: try {
049: Reader fileIn = new BufferedReader(new FileReader(
050: grammarFile));
051: ANTLRTokdefLexer tokdefLexer = new ANTLRTokdefLexer(fileIn);
052: ANTLRTokdefParser tokdefParser = new ANTLRTokdefParser(
053: tokdefLexer);
054: tokdefParser.setTool(antlrTool);
055: tokdefParser.setFilename(filename);
056: tokdefParser.file(this );
057: } catch (FileNotFoundException fnf) {
058: antlrTool.fatalError("Cannot find importVocab file '"
059: + filename + "'");
060: } catch (RecognitionException ex) {
061: antlrTool.fatalError("Error parsing importVocab file '"
062: + filename + "': " + ex.toString());
063: } catch (TokenStreamException ex) {
064: antlrTool.fatalError("Error reading importVocab file '"
065: + filename + "'");
066: }
067: }
068:
069: public Object clone() {
070: ImportVocabTokenManager tm;
071: tm = (ImportVocabTokenManager) super .clone();
072: tm.filename = this .filename;
073: tm.grammar = this .grammar;
074: return tm;
075: }
076:
077: /** define a token. */
078: public void define(TokenSymbol ts) {
079: super .define(ts);
080: }
081:
082: /** define a token. Intended for use only when reading the importVocab file. */
083: public void define(String s, int ttype) {
084: TokenSymbol ts = null;
085: if (s.startsWith("\"")) {
086: ts = new StringLiteralSymbol(s);
087: } else {
088: ts = new TokenSymbol(s);
089: }
090: ts.setTokenType(ttype);
091: super .define(ts);
092: maxToken = (ttype + 1) > maxToken ? (ttype + 1) : maxToken; // record maximum token type
093: }
094:
095: /** importVocab token manager is read-only if output would be same as input */
096: public boolean isReadOnly() {
097: return readOnly;
098: }
099:
100: /** Get the next unused token type. */
101: public int nextTokenType() {
102: return super.nextTokenType();
103: }
104: }
|