001: // $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$
002: package persistence.antlr;
003:
004: import persistence.antlr.TokenBuffer;
005: import persistence.antlr.TokenStreamException;
006: import persistence.antlr.TokenStreamIOException;
007: import persistence.antlr.ANTLRException;
008: import persistence.antlr.LLkParser;
009: import persistence.antlr.Token;
010: import persistence.antlr.TokenStream;
011: import persistence.antlr.RecognitionException;
012: import persistence.antlr.NoViableAltException;
013: import persistence.antlr.MismatchedTokenException;
014: import persistence.antlr.SemanticException;
015: import persistence.antlr.ParserSharedInputState;
016: import persistence.antlr.collections.impl.BitSet;
017:
018: /** Simple lexer/parser for reading token definition files
019: in support of the import/export vocab option for grammars.
020: */
021: public class ANTLRTokdefParser extends persistence.antlr.LLkParser
022: implements ANTLRTokdefParserTokenTypes {
023:
024: // This chunk of error reporting code provided by Brian Smith
025:
026: private persistence.antlr.Tool antlrTool;
027:
028: /** In order to make it so existing subclasses don't break, we won't require
029: * that the persistence.antlr.Tool instance be passed as a constructor element. Instead,
030: * the persistence.antlr.Tool instance should register itself via {@link #initTool(antlr.Tool)}
031: * @throws IllegalStateException if a tool has already been registered
032: * @since 2.7.2
033: */
034: public void setTool(persistence.antlr.Tool tool) {
035: if (antlrTool == null) {
036: antlrTool = tool;
037: } else {
038: throw new IllegalStateException(
039: "persistence.antlr.Tool already registered");
040: }
041: }
042:
043: /** @since 2.7.2 */
044: protected persistence.antlr.Tool getTool() {
045: return antlrTool;
046: }
047:
048: /** Delegates the error message to the tool if any was registered via
049: * {@link #initTool(persistence.antlr.Tool)}
050: * @since 2.7.2
051: */
052: public void reportError(String s) {
053: if (getTool() != null) {
054: getTool().error(s, getFilename(), -1, -1);
055: } else {
056: super .reportError(s);
057: }
058: }
059:
060: /** Delegates the error message to the tool if any was registered via
061: * {@link #initTool(persistence.antlr.Tool)}
062: * @since 2.7.2
063: */
064: public void reportError(RecognitionException e) {
065: if (getTool() != null) {
066: getTool().error(e.getErrorMessage(), e.getFilename(),
067: e.getLine(), e.getColumn());
068: } else {
069: super .reportError(e);
070: }
071: }
072:
073: /** Delegates the warning message to the tool if any was registered via
074: * {@link #initTool(persistence.antlr.Tool)}
075: * @since 2.7.2
076: */
077: public void reportWarning(String s) {
078: if (getTool() != null) {
079: getTool().warning(s, getFilename(), -1, -1);
080: } else {
081: super .reportWarning(s);
082: }
083: }
084:
085: protected ANTLRTokdefParser(TokenBuffer tokenBuf, int k) {
086: super (tokenBuf, k);
087: tokenNames = _tokenNames;
088: }
089:
090: public ANTLRTokdefParser(TokenBuffer tokenBuf) {
091: this (tokenBuf, 3);
092: }
093:
094: protected ANTLRTokdefParser(TokenStream lexer, int k) {
095: super (lexer, k);
096: tokenNames = _tokenNames;
097: }
098:
099: public ANTLRTokdefParser(TokenStream lexer) {
100: this (lexer, 3);
101: }
102:
103: public ANTLRTokdefParser(ParserSharedInputState state) {
104: super (state, 3);
105: tokenNames = _tokenNames;
106: }
107:
108: public final void file(ImportVocabTokenManager tm)
109: throws RecognitionException, TokenStreamException {
110:
111: Token name = null;
112:
113: try { // for error handling
114: name = LT(1);
115: match(ID);
116: {
117: _loop225: do {
118: if ((LA(1) == ID || LA(1) == STRING)) {
119: line(tm);
120: } else {
121: break _loop225;
122: }
123:
124: } while (true);
125: }
126: } catch (RecognitionException ex) {
127: reportError(ex);
128: consume();
129: consumeUntil(_tokenSet_0);
130: }
131: }
132:
133: public final void line(ImportVocabTokenManager tm)
134: throws RecognitionException, TokenStreamException {
135:
136: Token s1 = null;
137: Token lab = null;
138: Token s2 = null;
139: Token id = null;
140: Token para = null;
141: Token id2 = null;
142: Token i = null;
143: Token t = null;
144: Token s = null;
145:
146: try { // for error handling
147: {
148: if ((LA(1) == STRING)) {
149: s1 = LT(1);
150: match(STRING);
151: s = s1;
152: } else if ((LA(1) == ID) && (LA(2) == ASSIGN)
153: && (LA(3) == STRING)) {
154: lab = LT(1);
155: match(ID);
156: t = lab;
157: match(ASSIGN);
158: s2 = LT(1);
159: match(STRING);
160: s = s2;
161: } else if ((LA(1) == ID) && (LA(2) == LPAREN)) {
162: id = LT(1);
163: match(ID);
164: t = id;
165: match(LPAREN);
166: para = LT(1);
167: match(STRING);
168: match(RPAREN);
169: } else if ((LA(1) == ID) && (LA(2) == ASSIGN)
170: && (LA(3) == INT)) {
171: id2 = LT(1);
172: match(ID);
173: t = id2;
174: } else {
175: throw new NoViableAltException(LT(1), getFilename());
176: }
177:
178: }
179: match(ASSIGN);
180: i = LT(1);
181: match(INT);
182:
183: Integer value = Integer.valueOf(i.getText());
184: // if literal found, define as a string literal
185: if (s != null) {
186: tm.define(s.getText(), value.intValue());
187: // if label, then label the string and map label to token symbol also
188: if (t != null) {
189: StringLiteralSymbol sl = (StringLiteralSymbol) tm
190: .getTokenSymbol(s.getText());
191: sl.setLabel(t.getText());
192: tm.mapToTokenSymbol(t.getText(), sl);
193: }
194: }
195: // define token (not a literal)
196: else if (t != null) {
197: tm.define(t.getText(), value.intValue());
198: if (para != null) {
199: TokenSymbol ts = tm.getTokenSymbol(t.getText());
200: ts.setParaphrase(para.getText());
201: }
202: }
203:
204: } catch (RecognitionException ex) {
205: reportError(ex);
206: consume();
207: consumeUntil(_tokenSet_1);
208: }
209: }
210:
211: public static final String[] _tokenNames = { "<0>", "EOF", "<2>",
212: "NULL_TREE_LOOKAHEAD", "ID", "STRING", "ASSIGN", "LPAREN",
213: "RPAREN", "INT", "WS", "SL_COMMENT", "ML_COMMENT", "ESC",
214: "DIGIT", "XDIGIT" };
215:
216: private static final long[] mk_tokenSet_0() {
217: long[] data = { 2L, 0L };
218: return data;
219: }
220:
221: public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
222:
223: private static final long[] mk_tokenSet_1() {
224: long[] data = { 50L, 0L };
225: return data;
226: }
227:
228: public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
229:
230: }
|