Source Code Cross Referenced for Parser.java in  » IDE-Netbeans » cnd » antlr » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » IDE Netbeans » cnd » antlr 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package antlr;
002:
003:        /* ANTLR Translator Generator
004:         * Project led by Terence Parr at http://www.cs.usfca.edu
005:         * Software rights: http://www.antlr.org/license.html
006:         */
007:
008:        import antlr.collections.AST;
009:        import antlr.collections.impl.BitSet;
010:        import antlr.debug.*;
011:
012:        import java.util.HashMap;
013:        import java.util.Hashtable;
014:        import java.util.Map;
015:
016:        public abstract class Parser extends MatchExceptionState {
017:            public static final int MEMO_RULE_FAILED = -2;
018:            public static final int MEMO_RULE_UNKNOWN = -1;
019:            public static final int INITIAL_FOLLOW_STACK_SIZE = 100;
020:
021:            protected ParserSharedInputState inputState;
022:
023:            /** Nesting level of registered handlers */
024:            // protected int exceptionLevel = 0;
025:            /** Table of token type to token names */
026:            protected String[] tokenNames;
027:
028:            /** AST return value for a rule is squirreled away here */
029:            protected AST returnAST;
030:
031:            /** AST support code; parser delegates to this object.
032:             *  This is set during parser construction by default
033:             *  to either "new ASTFactory()" or a ctor that
034:             *  has a token type to class map for hetero nodes.
035:             */
036:            protected ASTFactory astFactory = null;
037:
038:            /** Constructed if any AST types specified in tokens{..}.
039:             *  Maps an Integer->Class object.
040:             */
041:            protected Hashtable tokenTypeToASTClassMap = null;
042:
043:            private boolean ignoreInvalidDebugCalls = false;
044:
045:            /** Used to keep track of indentdepth for traceIn/Out */
046:            protected int traceDepth = 0;
047:
048:            /** An array[size num rules] of Map<Integer,Integer> that tracks
049:             *  the stop token index for each rule.  ruleMemo[ruleIndex] is
050:             *  the memoization table for ruleIndex.  For key ruleStartIndex, you
051:             *  get back the stop token for associated rule or MEMO_RULE_FAILED.
052:             */
053:            protected Map[] ruleMemo;
054:
055:            /** Set to true upon any error; reset upon first valid token match */
056:            // functionality moved to matchError from the MatchExceptionState
057:            //protected boolean failed = false;
058:            public Parser() {
059:                this (new ParserSharedInputState());
060:            }
061:
062:            public Parser(ParserSharedInputState state) {
063:                inputState = state;
064:            }
065:
066:            /** If the user specifies a tokens{} section with heterogeneous
067:             *  AST node types, then ANTLR generates code to fill
068:             *  this mapping.
069:             */
070:            public Hashtable getTokenTypeToASTClassMap() {
071:                return tokenTypeToASTClassMap;
072:            }
073:
074:            public void addMessageListener(MessageListener l) {
075:                if (!ignoreInvalidDebugCalls)
076:                    throw new IllegalArgumentException(
077:                            "addMessageListener() is only valid if parser built for debugging");
078:            }
079:
080:            public void addParserListener(ParserListener l) {
081:                if (!ignoreInvalidDebugCalls)
082:                    throw new IllegalArgumentException(
083:                            "addParserListener() is only valid if parser built for debugging");
084:            }
085:
086:            public void addParserMatchListener(ParserMatchListener l) {
087:                if (!ignoreInvalidDebugCalls)
088:                    throw new IllegalArgumentException(
089:                            "addParserMatchListener() is only valid if parser built for debugging");
090:            }
091:
092:            public void addParserTokenListener(ParserTokenListener l) {
093:                if (!ignoreInvalidDebugCalls)
094:                    throw new IllegalArgumentException(
095:                            "addParserTokenListener() is only valid if parser built for debugging");
096:            }
097:
098:            public void addSemanticPredicateListener(SemanticPredicateListener l) {
099:                if (!ignoreInvalidDebugCalls)
100:                    throw new IllegalArgumentException(
101:                            "addSemanticPredicateListener() is only valid if parser built for debugging");
102:            }
103:
104:            public void addSyntacticPredicateListener(
105:                    SyntacticPredicateListener l) {
106:                if (!ignoreInvalidDebugCalls)
107:                    throw new IllegalArgumentException(
108:                            "addSyntacticPredicateListener() is only valid if parser built for debugging");
109:            }
110:
111:            public void addTraceListener(TraceListener l) {
112:                if (!ignoreInvalidDebugCalls)
113:                    throw new IllegalArgumentException(
114:                            "addTraceListener() is only valid if parser built for debugging");
115:            }
116:
117:            /**Get another token object from the token stream */
118:            public abstract void consume();
119:
120:            /** Consume tokens until one matches the given token */
121:            public void consumeUntil(int tokenType) {
122:                int LA1 = LA(1);
123:                while (LA1 != Token.EOF_TYPE && LA1 != tokenType) {
124:                    consume();
125:                    LA1 = LA(1);
126:                }
127:            }
128:
129:            /** Consume tokens until one matches the given token set */
130:            public void consumeUntil(BitSet set) {
131:                int LA1 = LA(1);
132:                while (LA1 != Token.EOF_TYPE && !set.member(LA1)) {
133:                    consume();
134:                    LA1 = LA(1);
135:                }
136:            }
137:
138:            protected void defaultDebuggingSetup(TokenStream lexer,
139:                    TokenBuffer tokBuf) {
140:                // by default, do nothing -- we're not debugging
141:            }
142:
143:            /** Get the AST return value squirreled away in the parser */
144:            public AST getAST() {
145:                return returnAST;
146:            }
147:
148:            public ASTFactory getASTFactory() {
149:                return astFactory;
150:            }
151:
152:            public String getFilename() {
153:                return inputState.filename;
154:            }
155:
156:            public ParserSharedInputState getInputState() {
157:                return inputState;
158:            }
159:
160:            public void setInputState(ParserSharedInputState state) {
161:                inputState = state;
162:            }
163:
164:            public String getTokenName(int num) {
165:                return tokenNames[num];
166:            }
167:
168:            public String[] getTokenNames() {
169:                return tokenNames;
170:            }
171:
172:            public boolean isDebugMode() {
173:                return false;
174:            }
175:
176:            /** Return the token type of the ith token of lookahead where i=1
177:             * is the current token being examined by the parser (i.e., it
178:             * has not been matched yet).
179:             */
180:            public abstract int LA(int i);
181:
182:            /**Return the ith token of lookahead */
183:            public abstract Token LT(int i);
184:
185:            // Forwarded to TokenBuffer
186:            public int mark() {
187:                return inputState.input.mark();
188:            }
189:
190:            /**Make sure current lookahead symbol matches token type <tt>t</tt>.
191:             * Throw an exception upon mismatch, which is catch by either the
192:             * error handler or by the syntactic predicate.
193:             */
194:            public void match(int t) throws MismatchedTokenException {
195:                assert (matchError == false);
196:                if (LA(1) != t) {
197:                    matchError = true;
198:                    throw new MismatchedTokenException(tokenNames, LT(1), t,
199:                            false, getFilename());
200:                } else {
201:                    // mark token as consumed -- fetch next token deferred until LA/LT
202:                    matchError = false;
203:                    consume();
204:                }
205:            }
206:
207:            /**Make sure current lookahead symbol matches the given set
208:             * Throw an exception upon mismatch, which is catch by either the
209:             * error handler or by the syntactic predicate.
210:             */
211:            public void match(BitSet b) throws MismatchedTokenException {
212:                assert (matchError == false);
213:                if (!b.member(LA(1))) {
214:                    matchError = true;
215:                    throw new MismatchedTokenException(tokenNames, LT(1), b,
216:                            false, getFilename());
217:                } else {
218:                    // mark token as consumed -- fetch next token deferred until LA/LT
219:                    matchError = false;
220:                    consume();
221:                }
222:            }
223:
224:            public void matchNot(int t) throws MismatchedTokenException {
225:                assert (matchError == false);
226:                if (LA(1) == t) {
227:                    // Throws inverted-sense exception
228:                    matchError = true;
229:                    throw new MismatchedTokenException(tokenNames, LT(1), t,
230:                            true, getFilename());
231:                } else {
232:                    // mark token as consumed -- fetch next token deferred until LA/LT
233:                    matchError = false;
234:                    consume();
235:                }
236:            }
237:
238:            /** @deprecated as of 2.7.2. This method calls System.exit() and writes
239:             *  directly to stderr, which is usually not appropriate when
240:             *  a parser is embedded into a larger application. Since the method is
241:             *  <code>static</code>, it cannot be overridden to avoid these problems.
242:             *  ANTLR no longer uses this method internally or in generated code.
243:             */
244:            public static void panic() {
245:                System.err.println("Parser: panic");
246:                System.exit(1);
247:            }
248:
249:            public void removeMessageListener(MessageListener l) {
250:                if (!ignoreInvalidDebugCalls)
251:                    throw new RuntimeException(
252:                            "removeMessageListener() is only valid if parser built for debugging");
253:            }
254:
255:            public void removeParserListener(ParserListener l) {
256:                if (!ignoreInvalidDebugCalls)
257:                    throw new RuntimeException(
258:                            "removeParserListener() is only valid if parser built for debugging");
259:            }
260:
261:            public void removeParserMatchListener(ParserMatchListener l) {
262:                if (!ignoreInvalidDebugCalls)
263:                    throw new RuntimeException(
264:                            "removeParserMatchListener() is only valid if parser built for debugging");
265:            }
266:
267:            public void removeParserTokenListener(ParserTokenListener l) {
268:                if (!ignoreInvalidDebugCalls)
269:                    throw new RuntimeException(
270:                            "removeParserTokenListener() is only valid if parser built for debugging");
271:            }
272:
273:            public void removeSemanticPredicateListener(
274:                    SemanticPredicateListener l) {
275:                if (!ignoreInvalidDebugCalls)
276:                    throw new IllegalArgumentException(
277:                            "removeSemanticPredicateListener() is only valid if parser built for debugging");
278:            }
279:
280:            public void removeSyntacticPredicateListener(
281:                    SyntacticPredicateListener l) {
282:                if (!ignoreInvalidDebugCalls)
283:                    throw new IllegalArgumentException(
284:                            "removeSyntacticPredicateListener() is only valid if parser built for debugging");
285:            }
286:
287:            public void removeTraceListener(TraceListener l) {
288:                if (!ignoreInvalidDebugCalls)
289:                    throw new RuntimeException(
290:                            "removeTraceListener() is only valid if parser built for debugging");
291:            }
292:
293:            /** Parser error-reporting function can be overridden in subclass */
294:            public void reportError(RecognitionException ex) {
295:                System.err.println(ex);
296:            }
297:
298:            /** Parser error-reporting function can be overridden in subclass */
299:            public void reportError(String s) {
300:                if (getFilename() == null) {
301:                    System.err.println("error: " + s);
302:                } else {
303:                    System.err.println(getFilename() + ": error: " + s);
304:                }
305:            }
306:
307:            /** Parser warning-reporting function can be overridden in subclass */
308:            public void reportWarning(String s) {
309:                if (getFilename() == null) {
310:                    System.err.println("warning: " + s);
311:                } else {
312:                    System.err.println(getFilename() + ": warning: " + s);
313:                }
314:            }
315:
316:            public void recover(RecognitionException ex, BitSet tokenSet) {
317:                consume();
318:                consumeUntil(tokenSet);
319:            }
320:
321:            public void rewind(int pos) {
322:                inputState.input.rewind(pos);
323:            }
324:
325:            /** Specify an object with support code (shared by
326:             *  Parser and TreeParser.  Normally, the programmer
327:             *  does not play with this, using setASTNodeType instead.
328:             */
329:            public void setASTFactory(ASTFactory f) {
330:                astFactory = f;
331:            }
332:
333:            public void setASTNodeClass(String cl) {
334:                astFactory.setASTNodeClass(cl);
335:            }
336:
337:            /** Specify the type of node to create during tree building; use setASTNodeClass now
338:             *  to be consistent with Token Object Type accessor.
339:             *  @deprecated since 2.7.1
340:             */
341:            public void setASTNodeType(String nodeType) {
342:                setASTNodeClass(nodeType);
343:            }
344:
345:            public void setDebugMode(boolean debugMode) {
346:                if (!ignoreInvalidDebugCalls)
347:                    throw new RuntimeException(
348:                            "setDebugMode() only valid if parser built for debugging");
349:            }
350:
351:            public void setFilename(String f) {
352:                inputState.filename = f;
353:            }
354:
355:            public void setIgnoreInvalidDebugCalls(boolean value) {
356:                ignoreInvalidDebugCalls = value;
357:            }
358:
359:            /** Set or change the input token buffer */
360:            public void setTokenBuffer(TokenBuffer t) {
361:                inputState.input = t;
362:            }
363:
364:            public void traceIndent() {
365:                for (int i = 0; i < traceDepth; i++)
366:                    System.out.print(" ");
367:            }
368:
369:            public void traceIn(String rname) {
370:                traceDepth += 1;
371:                traceIndent();
372:                System.out.println("> "
373:                        + rname
374:                        + "; LA(1)=="
375:                        + LT(1).getText()
376:                        + ((inputState.guessing > 0) ? " [guessing="
377:                                + inputState.guessing + "]" : ""));
378:            }
379:
380:            public void traceOut(String rname) {
381:                traceIndent();
382:                System.out.println("< "
383:                        + rname
384:                        + "; LA(1)=="
385:                        + LT(1).getText()
386:                        + ((inputState.guessing > 0) ? " [guessing="
387:                                + inputState.guessing + "]" : ""));
388:                traceDepth -= 1;
389:            }
390:
391:            /** Given a rule number and a start token index number, return
392:             *  MEMO_RULE_UNKNOWN if the rule has not parsed input starting from
393:             *  start index.  If this rule has parsed input starting from the
394:             *  start index before, then return where the rule stopped parsing.
395:             *  It returns the index of the last token matched by the rule.
396:             *
397:             *  For now we use a hashtable and just the slow Object-based one.
398:             *  Later, we can make a special one for ints and also one that
399:             *  tosses out data after we commit past input position i.
400:             */
401:            public int getRuleMemoization(int ruleIndex, int ruleStartIndex) {
402:                if (ruleMemo[ruleIndex] == null) {
403:                    ruleMemo[ruleIndex] = new HashMap();
404:                }
405:                Integer stopIndexI = (Integer) ruleMemo[ruleIndex]
406:                        .get(new Integer(ruleStartIndex));
407:                if (stopIndexI == null) {
408:                    return MEMO_RULE_UNKNOWN;
409:                }
410:                return stopIndexI.intValue();
411:            }
412:
413:            /** Has this rule already parsed input at the current index in the
414:             *  input stream?  Return the stop token index or MEMO_RULE_UNKNOWN.
415:             *  If we attempted but failed to parse properly before, return
416:             *  MEMO_RULE_FAILED.
417:             *
418:             *  This method has a side-effect: if we have seen this input for
419:             *  this rule and successfully parsed before, then seek ahead to
420:             *  1 past the stop token matched for this rule last time.
421:             */
422:            public boolean alreadyParsedRule(int ruleIndex) {
423:                //System.out.println("alreadyParsedRule("+ruleIndex+","+inputState.input.index()+")");
424:                int stopIndex = getRuleMemoization(ruleIndex, inputState.input
425:                        .index());
426:                if (stopIndex == MEMO_RULE_UNKNOWN) {
427:                    //System.out.println("rule unknown");
428:                    return false;
429:                }
430:                if (stopIndex == MEMO_RULE_FAILED) {
431:                    //System.out.println("rule "+ruleIndex+" will never succeed");
432:                    matchError = true;
433:                } else {
434:
435:                    /*System.out.println("seen rule "+ruleIndex+" before; skipping ahead to "+
436:                    	inputState.input.get(stopIndex+1)+"@"+(stopIndex+1)+" failed="+matchError);
437:                     */
438:                    matchError = false;
439:                    inputState.input.seek(stopIndex + 1); // jump to one past stop token
440:                }
441:                return true;
442:            }
443:
444:            /** Record whether or not this rule parsed the input at this position
445:             *  successfully.  Use a standard java hashtable for now.
446:             */
447:            public void memoize(int ruleIndex, int ruleStartIndex) {
448:                int stopTokenIndex = matchError ? MEMO_RULE_FAILED
449:                        : inputState.input.index() - 1;
450:                //System.out.println("memoize("+ruleIndex+", "+ruleStartIndex+"); failed="+matchError+" stop="+stopTokenIndex);
451:                if (ruleMemo[ruleIndex] != null) {
452:                    ruleMemo[ruleIndex].put(new Integer(ruleStartIndex),
453:                            new Integer(stopTokenIndex));
454:                }
455:            }
456:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.