Source Code Cross Referenced for MakeGrammar.java in  » Parser » Rats-Parser-Generators » antlr » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Parser » Rats Parser Generators » antlr 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package antlr;
002:
003:        /* ANTLR Translator Generator
004:         * Project led by Terence Parr at http://www.jGuru.com
005:         * Software rights: http://www.antlr.org/RIGHTS.html
006:         *
007:         * $Id: MakeGrammar.java,v 1.1 2004/01/21 19:18:32 rgrimm Exp $
008:         */
009:
010:        import antlr.collections.Stack;
011:        import antlr.collections.impl.LList;
012:        import antlr.collections.impl.Vector;
013:
014:        public class MakeGrammar extends DefineGrammarSymbols {
015:
016:            protected Stack blocks = new LList(); // track subrules--Stack<BlockContext>
017:            protected RuleRefElement lastRuleRef;
018:
019:            protected RuleEndElement ruleEnd; // used if not nested
020:            protected RuleBlock ruleBlock; // points to block of current rule.
021:            protected int nested = 0; // nesting inside a subrule
022:            protected boolean grammarError = false;
023:
024:            ExceptionSpec currentExceptionSpec = null;
025:
026:            public MakeGrammar(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
027:                super (tool_, args_, analyzer_);
028:            }
029:
030:            /** Abort the processing of a grammar (due to syntax errors) */
031:            public void abortGrammar() {
032:                String s = "unknown grammar";
033:                if (grammar != null) {
034:                    s = grammar.getClassName();
035:                }
036:                tool.error("aborting grammar '" + s + "' due to errors");
037:                super .abortGrammar();
038:            }
039:
040:            protected void addElementToCurrentAlt(AlternativeElement e) {
041:                e.enclosingRuleName = ruleBlock.ruleName;
042:                context().addAlternativeElement(e);
043:            }
044:
045:            public void beginAlt(boolean doAutoGen_) {
046:                super .beginAlt(doAutoGen_);
047:                Alternative alt = new Alternative();
048:                alt.setAutoGen(doAutoGen_);
049:                context().block.addAlternative(alt);
050:            }
051:
052:            public void beginChildList() {
053:                super .beginChildList();
054:                context().block.addAlternative(new Alternative());
055:            }
056:
057:            /** Add an exception group to a rule (currently a no-op) */
058:            public void beginExceptionGroup() {
059:                super .beginExceptionGroup();
060:                if (!(context().block instanceof  RuleBlock)) {
061:                    tool
062:                            .panic("beginExceptionGroup called outside of rule block");
063:                }
064:            }
065:
066:            /** Add an exception spec to an exception group or rule block */
067:            public void beginExceptionSpec(Token label) {
068:                // Hack the label string a bit to remove leading/trailing space.
069:                if (label != null) {
070:                    label.setText(StringUtils.stripFront(StringUtils.stripBack(
071:                            label.getText(), " \n\r\t"), " \n\r\t"));
072:                }
073:                super .beginExceptionSpec(label);
074:                // Don't check for currentExceptionSpec!=null because syntax errors
075:                // may leave it set to something.
076:                currentExceptionSpec = new ExceptionSpec(label);
077:            }
078:
079:            public void beginSubRule(Token label, Token start, boolean not) {
080:                super .beginSubRule(label, start, not);
081:                // we don't know what kind of subrule it is yet.
082:                // push a dummy one that will allow us to collect the
083:                // alternatives.  Later, we'll switch to real object.
084:                blocks.push(new BlockContext());
085:                context().block = new AlternativeBlock(grammar, start, not);
086:                context().altNum = 0; // reset alternative number
087:                nested++;
088:                // create a final node to which the last elememt of each
089:                // alternative will point.
090:                context().blockEnd = new BlockEndElement(grammar);
091:                // make sure end node points to start of block
092:                context().blockEnd.block = context().block;
093:                labelElement(context().block, label);
094:            }
095:
096:            public void beginTree(Token tok) throws SemanticException {
097:                if (!(grammar instanceof  TreeWalkerGrammar)) {
098:                    tool.error("Trees only allowed in TreeParser", grammar
099:                            .getFilename(), tok.getLine(), tok.getColumn());
100:                    throw new SemanticException(
101:                            "Trees only allowed in TreeParser");
102:                }
103:                super .beginTree(tok);
104:                blocks.push(new TreeBlockContext());
105:                context().block = new TreeElement(grammar, tok);
106:                context().altNum = 0; // reset alternative number
107:            }
108:
109:            public BlockContext context() {
110:                if (blocks.height() == 0) {
111:                    return null;
112:                } else {
113:                    return (BlockContext) blocks.top();
114:                }
115:            }
116:
117:            /**Used to build nextToken() for the lexer.
118:             * This builds a rule which has every "public" rule in the given Vector of
119:             * rules as it's alternate.  Each rule ref generates a Token object.
120:             * @param g  The Grammar that is being processed
121:             * @param lexRules A vector of lexer rules that will be used to create an alternate block.
122:             * @param rname The name of the resulting rule.
123:             */
124:            public static RuleBlock createNextTokenRule(Grammar g,
125:                    Vector lexRules, String rname) {
126:                // create actual rule data structure
127:                RuleBlock rb = new RuleBlock(g, rname);
128:                rb.setDefaultErrorHandler(g.getDefaultErrorHandler());
129:                RuleEndElement ruleEnd = new RuleEndElement(g);
130:                rb.setEndElement(ruleEnd);
131:                ruleEnd.block = rb;
132:                // Add an alternative for each element of the rules vector.
133:                for (int i = 0; i < lexRules.size(); i++) {
134:                    RuleSymbol r = (RuleSymbol) lexRules.elementAt(i);
135:                    if (!r.isDefined()) {
136:                        g.antlrTool.error("Lexer rule " + r.id.substring(1)
137:                                + " is not defined");
138:                    } else {
139:                        if (r.access.equals("public")) {
140:                            Alternative alt = new Alternative(); // create alt we'll add to ref rule
141:                            RuleBlock targetRuleBlock = r.getBlock();
142:                            Vector targetRuleAlts = targetRuleBlock
143:                                    .getAlternatives();
144:                            // collect a sem pred if only one alt and it's at the start;
145:                            // simple, but faster to implement until real hoisting
146:                            if (targetRuleAlts != null
147:                                    && targetRuleAlts.size() == 1) {
148:                                Alternative onlyAlt = (Alternative) targetRuleAlts
149:                                        .elementAt(0);
150:                                if (onlyAlt.semPred != null) {
151:                                    // ok, has sem pred, make this rule ref alt have a pred
152:                                    alt.semPred = onlyAlt.semPred;
153:                                    // REMOVE predicate from target rule???  NOPE, another
154:                                    // rule other than nextToken() might invoke it.
155:                                }
156:                            }
157:
158:                            // create a rule ref to lexer rule
159:                            // the Token is a RULE_REF not a TOKEN_REF since the
160:                            // conversion to mRulename has already taken place
161:                            RuleRefElement rr = new RuleRefElement(g,
162:                                    new CommonToken(ANTLRTokenTypes.RULE_REF, r
163:                                            .getId()),
164:                                    GrammarElement.AUTO_GEN_NONE);
165:                            rr.setLabel("theRetToken");
166:                            rr.enclosingRuleName = "nextToken";
167:                            rr.next = ruleEnd;
168:                            alt.addElement(rr); // add rule ref to alt
169:                            alt.setAutoGen(true); // keep text of elements
170:                            rb.addAlternative(alt); // add alt to rule block
171:                            r.addReference(rr); // track ref to this rule in rule blk
172:                        }
173:                    }
174:                }
175:
176:                rb.setAutoGen(true); // keep text of elements
177:                rb.prepareForAnalysis();
178:                //System.out.println(rb);
179:                return rb;
180:            }
181:
182:            /** Return block as if they had typed: "( rule )?" */
183:            private AlternativeBlock createOptionalRuleRef(String rule,
184:                    Token start) {
185:                // Make the subrule
186:                AlternativeBlock blk = new AlternativeBlock(grammar, start,
187:                        false);
188:
189:                // Make sure rule is defined
190:                String mrule = CodeGenerator.encodeLexerRuleName(rule); // can only be a lexer rule!
191:                if (!grammar.isDefined(mrule)) {
192:                    grammar.define(new RuleSymbol(mrule));
193:                }
194:
195:                // Make the rule ref element
196:                // RK: fixme probably easier to abuse start token..
197:                Token t = new CommonToken(ANTLRTokenTypes.TOKEN_REF, rule);
198:                t.setLine(start.getLine());
199:                t.setLine(start.getColumn());
200:                RuleRefElement rref = new RuleRefElement(grammar, t,
201:                        GrammarElement.AUTO_GEN_NONE);
202:
203:                rref.enclosingRuleName = ruleBlock.ruleName;
204:
205:                // Make the end of block element
206:                BlockEndElement end = new BlockEndElement(grammar);
207:                end.block = blk; // end block points back to start of blk
208:
209:                // Make an alternative, putting the rule ref into it
210:                Alternative alt = new Alternative(rref);
211:                alt.addElement(end); // last element in alt points to end of block
212:
213:                // Add the alternative to this block
214:                blk.addAlternative(alt);
215:
216:                // create an empty (optional) alt and add to blk
217:                Alternative optAlt = new Alternative();
218:                optAlt.addElement(end); // points immediately to end of block
219:
220:                blk.addAlternative(optAlt);
221:
222:                blk.prepareForAnalysis();
223:                return blk;
224:            }
225:
226:            public void defineRuleName(Token r, String access,
227:                    boolean ruleAutoGen, String docComment)
228:                    throws SemanticException {
229:                //		if ( Character.isUpperCase(r.getText().charAt(0)) ) {
230:                if (r.type == ANTLRTokenTypes.TOKEN_REF) {
231:                    if (!(grammar instanceof  LexerGrammar)) {
232:                        tool.error("Lexical rule " + r.getText()
233:                                + " defined outside of lexer", grammar
234:                                .getFilename(), r.getLine(), r.getColumn());
235:                        r.setText(r.getText().toLowerCase());
236:                    }
237:                } else {
238:                    if (grammar instanceof  LexerGrammar) {
239:                        tool.error("Lexical rule names must be upper case, '"
240:                                + r.getText() + "' is not", grammar
241:                                .getFilename(), r.getLine(), r.getColumn());
242:                        r.setText(r.getText().toUpperCase());
243:                    }
244:                }
245:
246:                super .defineRuleName(r, access, ruleAutoGen, docComment);
247:                String id = r.getText();
248:                //		if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
249:                if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
250:                    id = CodeGenerator.encodeLexerRuleName(id);
251:                }
252:                RuleSymbol rs = (RuleSymbol) grammar.getSymbol(id);
253:                RuleBlock rb = new RuleBlock(grammar, r.getText(), r.getLine(),
254:                        ruleAutoGen);
255:
256:                // Lexer rules do not generate default error handling
257:                rb.setDefaultErrorHandler(grammar.getDefaultErrorHandler());
258:
259:                ruleBlock = rb;
260:                blocks.push(new BlockContext()); // enter new context
261:                context().block = rb;
262:                rs.setBlock(rb);
263:                ruleEnd = new RuleEndElement(grammar);
264:                rb.setEndElement(ruleEnd);
265:                nested = 0;
266:            }
267:
268:            public void endAlt() {
269:                super .endAlt();
270:                if (nested == 0) { // all rule-level alts link to ruleEnd node
271:                    addElementToCurrentAlt(ruleEnd);
272:                } else {
273:                    addElementToCurrentAlt(context().blockEnd);
274:                }
275:                context().altNum++;
276:            }
277:
278:            public void endChildList() {
279:                super .endChildList();
280:                // create a final node to which the last elememt of the single
281:                // alternative will point.  Done for compatibility with analyzer.
282:                // Does NOT point to any block like alternative blocks because the
283:                // TreeElement is not a block.  This is used only as a placeholder.
284:                BlockEndElement be = new BlockEndElement(grammar);
285:                be.block = context().block;
286:                addElementToCurrentAlt(be);
287:            }
288:
289:            public void endExceptionGroup() {
290:                super .endExceptionGroup();
291:            }
292:
293:            public void endExceptionSpec() {
294:                super .endExceptionSpec();
295:                if (currentExceptionSpec == null) {
296:                    tool
297:                            .panic("exception processing internal error -- no active exception spec");
298:                }
299:                if (context().block instanceof  RuleBlock) {
300:                    // Named rule
301:                    ((RuleBlock) context().block)
302:                            .addExceptionSpec(currentExceptionSpec);
303:                } else {
304:                    // It must be a plain-old alternative block
305:                    if (context().currentAlt().exceptionSpec != null) {
306:                        tool
307:                                .error(
308:                                        "Alternative already has an exception specification",
309:                                        grammar.getFilename(), context().block
310:                                                .getLine(), context().block
311:                                                .getColumn());
312:                    } else {
313:                        context().currentAlt().exceptionSpec = currentExceptionSpec;
314:                    }
315:                }
316:                currentExceptionSpec = null;
317:            }
318:
319:            /** Called at the end of processing a grammar */
320:            public void endGrammar() {
321:                if (grammarError) {
322:                    abortGrammar();
323:                } else {
324:                    super .endGrammar();
325:                }
326:            }
327:
328:            public void endRule(String rule) {
329:                super .endRule(rule);
330:                BlockContext ctx = (BlockContext) blocks.pop(); // remove scope
331:                // record the start of this block in the ending node
332:                ruleEnd.block = ctx.block;
333:                ruleEnd.block.prepareForAnalysis();
334:                //System.out.println(ctx.block);
335:            }
336:
337:            public void endSubRule() {
338:                super .endSubRule();
339:                nested--;
340:                // remove subrule context from scope stack
341:                BlockContext ctx = (BlockContext) blocks.pop();
342:                AlternativeBlock block = ctx.block;
343:
344:                // If the subrule is marked with ~, check that it is
345:                // a valid candidate for analysis
346:                if (block.not && !(block instanceof  SynPredBlock)
347:                        && !(block instanceof  ZeroOrMoreBlock)
348:                        && !(block instanceof  OneOrMoreBlock)) {
349:                    if (!analyzer.subruleCanBeInverted(block,
350:                            grammar instanceof  LexerGrammar)) {
351:                        String newline = System.getProperty("line.separator");
352:                        tool
353:                                .error(
354:                                        "This subrule cannot be inverted.  Only subrules of the form:"
355:                                                + newline
356:                                                + "    (T1|T2|T3...) or"
357:                                                + newline
358:                                                + "    ('c1'|'c2'|'c3'...)"
359:                                                + newline
360:                                                + "may be inverted (ranges are also allowed).",
361:                                        grammar.getFilename(), block.getLine(),
362:                                        block.getColumn());
363:                    }
364:                }
365:
366:                // add the subrule as element if not a syn pred
367:                if (block instanceof  SynPredBlock) {
368:                    // record a reference to the recently-recognized syn pred in the
369:                    // enclosing block.
370:                    SynPredBlock synpred = (SynPredBlock) block;
371:                    context().block.hasASynPred = true;
372:                    context().currentAlt().synPred = synpred;
373:                    grammar.hasSyntacticPredicate = true;
374:                    synpred.removeTrackingOfRuleRefs(grammar);
375:                } else {
376:                    addElementToCurrentAlt(block);
377:                }
378:                ctx.blockEnd.block.prepareForAnalysis();
379:            }
380:
381:            public void endTree() {
382:                super .endTree();
383:                BlockContext ctx = (BlockContext) blocks.pop();
384:                addElementToCurrentAlt(ctx.block); // add new TreeElement to enclosing alt.
385:            }
386:
387:            /** Remember that a major error occured in the grammar */
388:            public void hasError() {
389:                grammarError = true;
390:            }
391:
392:            private void labelElement(AlternativeElement el, Token label) {
393:                if (label != null) {
394:                    // Does this label already exist?
395:                    for (int i = 0; i < ruleBlock.labeledElements.size(); i++) {
396:                        AlternativeElement altEl = (AlternativeElement) ruleBlock.labeledElements
397:                                .elementAt(i);
398:                        String l = altEl.getLabel();
399:                        if (l != null && l.equals(label.getText())) {
400:                            tool.error("Label '" + label.getText()
401:                                    + "' has already been defined", grammar
402:                                    .getFilename(), label.getLine(), label
403:                                    .getColumn());
404:                            return;
405:                        }
406:                    }
407:                    // add this node to the list of labeled elements
408:                    el.setLabel(label.getText());
409:                    ruleBlock.labeledElements.appendElement(el);
410:                }
411:            }
412:
413:            public void noAutoGenSubRule() {
414:                context().block.setAutoGen(false);
415:            }
416:
417:            public void oneOrMoreSubRule() {
418:                if (context().block.not) {
419:                    tool.error("'~' cannot be applied to (...)* subrule",
420:                            grammar.getFilename(), context().block.getLine(),
421:                            context().block.getColumn());
422:                }
423:                // create the right kind of object now that we know what that is
424:                // and switch the list of alternatives.  Adjust the stack of blocks.
425:                // copy any init action also.
426:                OneOrMoreBlock b = new OneOrMoreBlock(grammar);
427:                setBlock(b, context().block);
428:                BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
429:                blocks.push(new BlockContext());
430:                context().block = b;
431:                context().blockEnd = old.blockEnd;
432:                context().blockEnd.block = b;
433:            }
434:
435:            public void optionalSubRule() {
436:                if (context().block.not) {
437:                    tool.error("'~' cannot be applied to (...)? subrule",
438:                            grammar.getFilename(), context().block.getLine(),
439:                            context().block.getColumn());
440:                }
441:                // convert (X)? -> (X|) so that we can ignore optional blocks altogether!
442:                // It already thinks that we have a simple subrule, just add option block.
443:                beginAlt(false);
444:                endAlt();
445:            }
446:
447:            public void refAction(Token action) {
448:                super .refAction(action);
449:                context().block.hasAnAction = true;
450:                addElementToCurrentAlt(new ActionElement(grammar, action));
451:            }
452:
453:            public void setUserExceptions(String thr) {
454:                ((RuleBlock) context().block).throwsSpec = thr;
455:            }
456:
457:            // Only called for rule blocks
458:            public void refArgAction(Token action) {
459:                ((RuleBlock) context().block).argAction = action.getText();
460:            }
461:
462:            public void refCharLiteral(Token lit, Token label,
463:                    boolean inverted, int autoGenType, boolean lastInRule) {
464:                if (!(grammar instanceof  LexerGrammar)) {
465:                    tool.error("Character literal only valid in lexer", grammar
466:                            .getFilename(), lit.getLine(), lit.getColumn());
467:                    return;
468:                }
469:                super .refCharLiteral(lit, label, inverted, autoGenType,
470:                        lastInRule);
471:                CharLiteralElement cl = new CharLiteralElement(
472:                        (LexerGrammar) grammar, lit, inverted, autoGenType);
473:
474:                // Generate a warning for non-lowercase ASCII when case-insensitive
475:                if (!((LexerGrammar) grammar).caseSensitive
476:                        && cl.getType() < 128
477:                        && Character.toLowerCase((char) cl.getType()) != (char) cl
478:                                .getType()) {
479:                    tool
480:                            .warning(
481:                                    "Character literal must be lowercase when caseSensitive=false",
482:                                    grammar.getFilename(), lit.getLine(), lit
483:                                            .getColumn());
484:                }
485:
486:                addElementToCurrentAlt(cl);
487:                labelElement(cl, label);
488:
489:                // if ignore option is set, must add an optional call to the specified rule.
490:                String ignore = ruleBlock.getIgnoreRule();
491:                if (!lastInRule && ignore != null) {
492:                    addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
493:                }
494:            }
495:
496:            public void refCharRange(Token t1, Token t2, Token label,
497:                    int autoGenType, boolean lastInRule) {
498:                if (!(grammar instanceof  LexerGrammar)) {
499:                    tool.error("Character range only valid in lexer", grammar
500:                            .getFilename(), t1.getLine(), t1.getColumn());
501:                    return;
502:                }
503:                int rangeMin = ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
504:                int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
505:                if (rangeMax < rangeMin) {
506:                    tool.error("Malformed range.", grammar.getFilename(), t1
507:                            .getLine(), t1.getColumn());
508:                    return;
509:                }
510:
511:                // Generate a warning for non-lowercase ASCII when case-insensitive
512:                if (!((LexerGrammar) grammar).caseSensitive) {
513:                    if (rangeMin < 128
514:                            && Character.toLowerCase((char) rangeMin) != (char) rangeMin) {
515:                        tool
516:                                .warning(
517:                                        "Character literal must be lowercase when caseSensitive=false",
518:                                        grammar.getFilename(), t1.getLine(), t1
519:                                                .getColumn());
520:                    }
521:                    if (rangeMax < 128
522:                            && Character.toLowerCase((char) rangeMax) != (char) rangeMax) {
523:                        tool
524:                                .warning(
525:                                        "Character literal must be lowercase when caseSensitive=false",
526:                                        grammar.getFilename(), t2.getLine(), t2
527:                                                .getColumn());
528:                    }
529:                }
530:
531:                super .refCharRange(t1, t2, label, autoGenType, lastInRule);
532:                CharRangeElement cr = new CharRangeElement(
533:                        (LexerGrammar) grammar, t1, t2, autoGenType);
534:                addElementToCurrentAlt(cr);
535:                labelElement(cr, label);
536:
537:                // if ignore option is set, must add an optional call to the specified rule.
538:                String ignore = ruleBlock.getIgnoreRule();
539:                if (!lastInRule && ignore != null) {
540:                    addElementToCurrentAlt(createOptionalRuleRef(ignore, t1));
541:                }
542:            }
543:
544:            public void refTokensSpecElementOption(Token tok, Token option,
545:                    Token value) {
546:                /*
547:                System.out.println("setting tokens spec option for "+tok.getText());
548:                System.out.println(option.getText()+","+value.getText());
549:                 */
550:                TokenSymbol ts = (TokenSymbol) grammar.tokenManager
551:                        .getTokenSymbol(tok.getText());
552:                if (ts == null) {
553:                    tool.panic("cannot find " + tok.getText()
554:                            + "in tokens {...}");
555:                }
556:                if (option.getText().equals("AST")) {
557:                    ts.setASTNodeType(value.getText());
558:                } else {
559:                    grammar.antlrTool.error(
560:                            "invalid tokens {...} element option:"
561:                                    + option.getText(), grammar.getFilename(),
562:                            option.getLine(), option.getColumn());
563:                }
564:            }
565:
566:            public void refElementOption(Token option, Token value) {
567:                /*
568:                System.out.println("setting option for "+context().currentElement());
569:                System.out.println(option.getText()+","+value.getText());
570:                 */
571:                AlternativeElement e = context().currentElement();
572:                if (e instanceof  StringLiteralElement
573:                        || e instanceof  TokenRefElement
574:                        || e instanceof  WildcardElement) {
575:                    ((GrammarAtom) e).setOption(option, value);
576:                } else {
577:                    tool.error("cannot use element option (" + option.getText()
578:                            + ") for this kind of element", grammar
579:                            .getFilename(), option.getLine(), option
580:                            .getColumn());
581:                }
582:            }
583:
584:            /** Add an exception handler to an exception spec */
585:            public void refExceptionHandler(Token exTypeAndName, Token action) {
586:                super .refExceptionHandler(exTypeAndName, action);
587:                if (currentExceptionSpec == null) {
588:                    tool.panic("exception handler processing internal error");
589:                }
590:                currentExceptionSpec.addHandler(new ExceptionHandler(
591:                        exTypeAndName, action));
592:            }
593:
594:            public void refInitAction(Token action) {
595:                super .refAction(action);
596:                context().block.setInitAction(action.getText());
597:            }
598:
599:            public void refMemberAction(Token act) {
600:                grammar.classMemberAction = act;
601:            }
602:
603:            public void refPreambleAction(Token act) {
604:                super .refPreambleAction(act);
605:            }
606:
607:            // Only called for rule blocks
608:            public void refReturnAction(Token returnAction) {
609:                if (grammar instanceof  LexerGrammar) {
610:                    String name = CodeGenerator
611:                            .encodeLexerRuleName(((RuleBlock) context().block)
612:                                    .getRuleName());
613:                    RuleSymbol rs = (RuleSymbol) grammar.getSymbol(name);
614:                    if (rs.access.equals("public")) {
615:                        tool
616:                                .warning(
617:                                        "public Lexical rules cannot specify return type",
618:                                        grammar.getFilename(), returnAction
619:                                                .getLine(), returnAction
620:                                                .getColumn());
621:                        return;
622:                    }
623:                }
624:                ((RuleBlock) context().block).returnAction = returnAction
625:                        .getText();
626:            }
627:
628:            public void refRule(Token idAssign, Token r, Token label,
629:                    Token args, int autoGenType) {
630:                // Disallow parser rule references in the lexer
631:                if (grammar instanceof  LexerGrammar) {
632:                    //			if (!Character.isUpperCase(r.getText().charAt(0))) {
633:                    if (r.type != ANTLRTokenTypes.TOKEN_REF) {
634:                        tool.error("Parser rule " + r.getText()
635:                                + " referenced in lexer");
636:                        return;
637:                    }
638:                    if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
639:                        tool.error("AST specification ^ not allowed in lexer",
640:                                grammar.getFilename(), r.getLine(), r
641:                                        .getColumn());
642:                    }
643:                }
644:
645:                super .refRule(idAssign, r, label, args, autoGenType);
646:                lastRuleRef = new RuleRefElement(grammar, r, autoGenType);
647:                if (args != null) {
648:                    lastRuleRef.setArgs(args.getText());
649:                }
650:                if (idAssign != null) {
651:                    lastRuleRef.setIdAssign(idAssign.getText());
652:                }
653:                addElementToCurrentAlt(lastRuleRef);
654:
655:                String id = r.getText();
656:                //		if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
657:                if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
658:                    id = CodeGenerator.encodeLexerRuleName(id);
659:                }
660:                // update symbol table so it knows what nodes reference the rule.
661:                RuleSymbol rs = (RuleSymbol) grammar.getSymbol(id);
662:                rs.addReference(lastRuleRef);
663:                labelElement(lastRuleRef, label);
664:            }
665:
666:            public void refSemPred(Token pred) {
667:                //System.out.println("refSemPred "+pred.getText());
668:                super .refSemPred(pred);
669:                //System.out.println("context().block: "+context().block);
670:                if (context().currentAlt().atStart()) {
671:                    context().currentAlt().semPred = pred.getText();
672:                } else {
673:                    ActionElement a = new ActionElement(grammar, pred);
674:                    a.isSemPred = true;
675:                    addElementToCurrentAlt(a);
676:                }
677:                //System.out.println("DONE refSemPred "+pred.getText());
678:            }
679:
680:            public void refStringLiteral(Token lit, Token label,
681:                    int autoGenType, boolean lastInRule) {
682:                super .refStringLiteral(lit, label, autoGenType, lastInRule);
683:                if (grammar instanceof  TreeWalkerGrammar
684:                        && autoGenType == GrammarElement.AUTO_GEN_CARET) {
685:                    tool.error("^ not allowed in here for tree-walker", grammar
686:                            .getFilename(), lit.getLine(), lit.getColumn());
687:                }
688:                StringLiteralElement sl = new StringLiteralElement(grammar,
689:                        lit, autoGenType);
690:
691:                // If case-insensitive, then check each char of the stirng literal
692:                if (grammar instanceof  LexerGrammar
693:                        && !((LexerGrammar) grammar).caseSensitive) {
694:                    for (int i = 1; i < lit.getText().length() - 1; i++) {
695:                        char c = lit.getText().charAt(i);
696:                        if (c < 128 && Character.toLowerCase(c) != c) {
697:                            tool
698:                                    .warning(
699:                                            "Characters of string literal must be lowercase when caseSensitive=false",
700:                                            grammar.getFilename(), lit
701:                                                    .getLine(), lit.getColumn());
702:                            break;
703:                        }
704:                    }
705:                }
706:
707:                addElementToCurrentAlt(sl);
708:                labelElement(sl, label);
709:
710:                // if ignore option is set, must add an optional call to the specified rule.
711:                String ignore = ruleBlock.getIgnoreRule();
712:                if (!lastInRule && ignore != null) {
713:                    addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
714:                }
715:            }
716:
717:            public void refToken(Token idAssign, Token t, Token label,
718:                    Token args, boolean inverted, int autoGenType,
719:                    boolean lastInRule) {
720:                if (grammar instanceof  LexerGrammar) {
721:                    // In lexer, token references are really rule references
722:                    if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
723:                        tool.error("AST specification ^ not allowed in lexer",
724:                                grammar.getFilename(), t.getLine(), t
725:                                        .getColumn());
726:                    }
727:                    if (inverted) {
728:                        tool.error("~TOKEN is not allowed in lexer", grammar
729:                                .getFilename(), t.getLine(), t.getColumn());
730:                    }
731:                    refRule(idAssign, t, label, args, autoGenType);
732:
733:                    // if ignore option is set, must add an optional call to the specified token rule.
734:                    String ignore = ruleBlock.getIgnoreRule();
735:                    if (!lastInRule && ignore != null) {
736:                        addElementToCurrentAlt(createOptionalRuleRef(ignore, t));
737:                    }
738:                } else {
739:                    // Cannot have token ref args or assignment outside of lexer
740:                    if (idAssign != null) {
741:                        tool
742:                                .error(
743:                                        "Assignment from token reference only allowed in lexer",
744:                                        grammar.getFilename(), idAssign
745:                                                .getLine(), idAssign
746:                                                .getColumn());
747:                    }
748:                    if (args != null) {
749:                        tool
750:                                .error(
751:                                        "Token reference arguments only allowed in lexer",
752:                                        grammar.getFilename(), args.getLine(),
753:                                        args.getColumn());
754:                    }
755:                    super .refToken(idAssign, t, label, args, inverted,
756:                            autoGenType, lastInRule);
757:                    TokenRefElement te = new TokenRefElement(grammar, t,
758:                            inverted, autoGenType);
759:                    addElementToCurrentAlt(te);
760:                    labelElement(te, label);
761:                }
762:            }
763:
764:            public void refTokenRange(Token t1, Token t2, Token label,
765:                    int autoGenType, boolean lastInRule) {
766:                if (grammar instanceof  LexerGrammar) {
767:                    tool.error("Token range not allowed in lexer", grammar
768:                            .getFilename(), t1.getLine(), t1.getColumn());
769:                    return;
770:                }
771:                super .refTokenRange(t1, t2, label, autoGenType, lastInRule);
772:                TokenRangeElement tr = new TokenRangeElement(grammar, t1, t2,
773:                        autoGenType);
774:                if (tr.end < tr.begin) {
775:                    tool.error("Malformed range.", grammar.getFilename(), t1
776:                            .getLine(), t1.getColumn());
777:                    return;
778:                }
779:                addElementToCurrentAlt(tr);
780:                labelElement(tr, label);
781:            }
782:
783:            public void refTreeSpecifier(Token treeSpec) {
784:                context().currentAlt().treeSpecifier = treeSpec;
785:            }
786:
787:            public void refWildcard(Token t, Token label, int autoGenType) {
788:                super .refWildcard(t, label, autoGenType);
789:                WildcardElement wc = new WildcardElement(grammar, t,
790:                        autoGenType);
791:                addElementToCurrentAlt(wc);
792:                labelElement(wc, label);
793:            }
794:
795:            /** Get ready to process a new grammar */
796:            public void reset() {
797:                super .reset();
798:                blocks = new LList();
799:                lastRuleRef = null;
800:                ruleEnd = null;
801:                ruleBlock = null;
802:                nested = 0;
803:                currentExceptionSpec = null;
804:                grammarError = false;
805:            }
806:
807:            public void setArgOfRuleRef(Token argAction) {
808:                super .setArgOfRuleRef(argAction);
809:                lastRuleRef.setArgs(argAction.getText());
810:            }
811:
812:            public static void setBlock(AlternativeBlock b, AlternativeBlock src) {
813:                b.setAlternatives(src.getAlternatives());
814:                b.initAction = src.initAction;
815:                //b.lookaheadDepth = src.lookaheadDepth;
816:                b.label = src.label;
817:                b.hasASynPred = src.hasASynPred;
818:                b.hasAnAction = src.hasAnAction;
819:                b.warnWhenFollowAmbig = src.warnWhenFollowAmbig;
820:                b.generateAmbigWarnings = src.generateAmbigWarnings;
821:                b.line = src.line;
822:                b.greedy = src.greedy;
823:                b.greedySet = src.greedySet;
824:            }
825:
826:            public void setRuleOption(Token key, Token value) {
827:                //((RuleBlock)context().block).setOption(key, value);
828:                ruleBlock.setOption(key, value);
829:            }
830:
831:            public void setSubruleOption(Token key, Token value) {
832:                ((AlternativeBlock) context().block).setOption(key, value);
833:            }
834:
835:            public void synPred() {
836:                if (context().block.not) {
837:                    tool.error("'~' cannot be applied to syntactic predicate",
838:                            grammar.getFilename(), context().block.getLine(),
839:                            context().block.getColumn());
840:                }
841:                // create the right kind of object now that we know what that is
842:                // and switch the list of alternatives.  Adjust the stack of blocks.
843:                // copy any init action also.
844:                SynPredBlock b = new SynPredBlock(grammar);
845:                setBlock(b, context().block);
846:                BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
847:                blocks.push(new BlockContext());
848:                context().block = b;
849:                context().blockEnd = old.blockEnd;
850:                context().blockEnd.block = b;
851:            }
852:
853:            public void zeroOrMoreSubRule() {
854:                if (context().block.not) {
855:                    tool.error("'~' cannot be applied to (...)+ subrule",
856:                            grammar.getFilename(), context().block.getLine(),
857:                            context().block.getColumn());
858:                }
859:                // create the right kind of object now that we know what that is
860:                // and switch the list of alternatives.  Adjust the stack of blocks.
861:                // copy any init action also.
862:                ZeroOrMoreBlock b = new ZeroOrMoreBlock(grammar);
863:                setBlock(b, context().block);
864:                BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
865:                blocks.push(new BlockContext());
866:                context().block = b;
867:                context().blockEnd = old.blockEnd;
868:                context().blockEnd.block = b;
869:            }
870:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.