Source Code Cross Referenced for DefineGrammarSymbols.java in  » Parser » Rats-Parser-Generators » antlr » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Parser » Rats Parser Generators » antlr 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package antlr;
002:
003:        /* ANTLR Translator Generator
004:         * Project led by Terence Parr at http://www.jGuru.com
005:         * Software rights: http://www.antlr.org/RIGHTS.html
006:         *
007:         * $Id: DefineGrammarSymbols.java,v 1.1 2004/01/21 19:18:30 rgrimm Exp $
008:         */
009:
010:        import java.util.Hashtable;
011:
012:        import antlr.collections.impl.BitSet;
013:
014:        /**DefineGrammarSymbols is a behavior for the ANTLRParser that adds all
015:         * the token and rule symbols to the grammar symbol table.
016:         *
017:         * Token types are assigned to token symbols in this class also.
018:         * The token type for a token is done in the order seen (lexically).
019:         */
020:        public class DefineGrammarSymbols implements  ANTLRGrammarParseBehavior {
021:            // Contains all of the defined parser and lexer Grammar's indexed by name
022:            protected Hashtable grammars = new Hashtable();
023:            // Contains all the TokenManagers indexed by name
024:            protected Hashtable tokenManagers = new Hashtable();
025:            // Current grammar (parser or lexer)
026:            protected Grammar grammar;
027:            // The tool under which this is invoked
028:            protected Tool tool;
029:            // The grammar analyzer object
030:            LLkAnalyzer analyzer;
031:            // The command-line arguments passed to the tool.
032:            // This allows each grammar to parse the arguments as it is created
033:            String[] args;
034:            // Name for default token manager does not match any valid name
035:            static final String DEFAULT_TOKENMANAGER_NAME = "*default";
036:            // Header actions apply to all parsers unless redefined
037:            // Contains all of the header actions indexed by name
038:            protected Hashtable headerActions = new Hashtable();
039:            // Place where preamble is stored until a grammar is defined
040:            Token thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); // init to empty token
041:            // The target language
042:            String language = "Java";
043:
044:            protected int numLexers = 0;
045:            protected int numParsers = 0;
046:            protected int numTreeParsers = 0;
047:
048:            public DefineGrammarSymbols(Tool tool_, String[] args_,
049:                    LLkAnalyzer analyzer_) {
050:                tool = tool_;
051:                args = args_;
052:                analyzer = analyzer_;
053:            }
054:
055:            public void _refStringLiteral(Token lit, Token label,
056:                    int autoGenType, boolean lastInRule) {
057:                if (!(grammar instanceof  LexerGrammar)) {
058:                    // String literals are treated like tokens except by the lexer
059:                    String str = lit.getText();
060:                    if (grammar.tokenManager.getTokenSymbol(str) != null) {
061:                        // string symbol is already defined
062:                        return;
063:                    }
064:                    StringLiteralSymbol sl = new StringLiteralSymbol(str);
065:                    int tt = grammar.tokenManager.nextTokenType();
066:                    sl.setTokenType(tt);
067:                    grammar.tokenManager.define(sl);
068:                }
069:            }
070:
071:            /** Reference a token */
072:            public void _refToken(Token assignId, Token t, Token label,
073:                    Token args, boolean inverted, int autoGenType,
074:                    boolean lastInRule) {
075:                String id = t.getText();
076:                if (!grammar.tokenManager.tokenDefined(id)) {
077:                    /*
078:                    // RK: dish out a warning if the token was not defined before.
079:                    tool.warning("Token '" + id + "' defined outside tokens section",
080:                                 tool.grammarFile, t.getLine(), t.getColumn());
081:                     */
082:                    int tt = grammar.tokenManager.nextTokenType();
083:                    TokenSymbol ts = new TokenSymbol(id);
084:                    ts.setTokenType(tt);
085:                    grammar.tokenManager.define(ts);
086:                }
087:            }
088:
089:            /** Abort the processing of a grammar due to syntax errors */
090:            public void abortGrammar() {
091:                if (grammar != null && grammar.getClassName() != null) {
092:                    grammars.remove(grammar.getClassName());
093:                }
094:                grammar = null;
095:            }
096:
097:            public void beginAlt(boolean doAST_) {
098:            }
099:
100:            public void beginChildList() {
101:            }
102:
103:            // Exception handling
104:            public void beginExceptionGroup() {
105:            }
106:
107:            public void beginExceptionSpec(Token label) {
108:            }
109:
110:            public void beginSubRule(Token label, Token start, boolean not) {
111:            }
112:
113:            public void beginTree(Token tok) throws SemanticException {
114:            }
115:
116:            /** Define a lexer or parser rule */
117:            public void defineRuleName(Token r, String access,
118:                    boolean ruleAutoGen, String docComment)
119:                    throws SemanticException {
120:                String id = r.getText();
121:
122:                //		if ( Character.isUpperCase(id.charAt(0)) ) {
123:                if (r.type == ANTLRTokenTypes.TOKEN_REF) {
124:                    // lexer rule
125:                    id = CodeGenerator.encodeLexerRuleName(id);
126:                    // make sure we define it as token identifier also
127:                    if (!grammar.tokenManager.tokenDefined(r.getText())) {
128:                        int tt = grammar.tokenManager.nextTokenType();
129:                        TokenSymbol ts = new TokenSymbol(r.getText());
130:                        ts.setTokenType(tt);
131:                        grammar.tokenManager.define(ts);
132:                    }
133:                }
134:
135:                RuleSymbol rs;
136:                if (grammar.isDefined(id)) {
137:                    // symbol seen before?
138:                    rs = (RuleSymbol) grammar.getSymbol(id);
139:                    // rule just referenced or has it been defined yet?
140:                    if (rs.isDefined()) {
141:                        tool.error("redefinition of rule " + id, grammar
142:                                .getFilename(), r.getLine(), r.getColumn());
143:                    }
144:                } else {
145:                    rs = new RuleSymbol(id);
146:                    grammar.define(rs);
147:                }
148:                rs.setDefined();
149:                rs.access = access;
150:                rs.comment = docComment;
151:            }
152:
153:            /** Define a token from tokens {...}.
154:             *  Must be label and literal or just label or just a literal.
155:             */
156:            public void defineToken(Token tokname, Token tokliteral) {
157:                String name = null;
158:                String literal = null;
159:                if (tokname != null) {
160:                    name = tokname.getText();
161:                }
162:                if (tokliteral != null) {
163:                    literal = tokliteral.getText();
164:                }
165:                // System.out.println("defining " + name + " with literal " + literal);
166:                //
167:                if (literal != null) {
168:                    StringLiteralSymbol sl = (StringLiteralSymbol) grammar.tokenManager
169:                            .getTokenSymbol(literal);
170:                    if (sl != null) {
171:                        // This literal is known already.
172:                        // If the literal has no label already, but we can provide
173:                        // one here, then no problem, just map the label to the literal
174:                        // and don't change anything else.
175:                        // Otherwise, labels conflict: error.
176:                        if (name == null || sl.getLabel() != null) {
177:                            tool.warning(
178:                                    "Redefinition of literal in tokens {...}: "
179:                                            + literal, grammar.getFilename(),
180:                                    tokliteral.getLine(), tokliteral
181:                                            .getColumn());
182:                            return;
183:                        } else if (name != null) {
184:                            // The literal had no label, but new def does.  Set it.
185:                            sl.setLabel(name);
186:                            // Also, map the label to the literal.
187:                            grammar.tokenManager.mapToTokenSymbol(name, sl);
188:                        }
189:                    }
190:                    // if they provide a name/label and that name/label already
191:                    // exists, just hook this literal onto old token.
192:                    if (name != null) {
193:                        TokenSymbol ts = (TokenSymbol) grammar.tokenManager
194:                                .getTokenSymbol(name);
195:                        if (ts != null) {
196:                            // watch out that the label is not more than just a token.
197:                            // If it already has a literal attached, then: conflict.
198:                            if (ts instanceof  StringLiteralSymbol) {
199:                                tool.warning(
200:                                        "Redefinition of token in tokens {...}: "
201:                                                + name, grammar.getFilename(),
202:                                        tokliteral.getLine(), tokliteral
203:                                                .getColumn());
204:                                return;
205:                            }
206:                            // a simple token symbol such as DECL is defined
207:                            // must convert it to a StringLiteralSymbol with a
208:                            // label by co-opting token type and killing old
209:                            // TokenSymbol.  Kill mapping and entry in vector
210:                            // of token manager.
211:                            // First, claim token type.
212:                            int ttype = ts.getTokenType();
213:                            // now, create string literal with label
214:                            sl = new StringLiteralSymbol(literal);
215:                            sl.setTokenType(ttype);
216:                            sl.setLabel(name);
217:                            // redefine this critter as a string literal
218:                            grammar.tokenManager.define(sl);
219:                            // make sure the label can be used also.
220:                            grammar.tokenManager.mapToTokenSymbol(name, sl);
221:                            return;
222:                        }
223:                        // here, literal was labeled but not by a known token symbol.
224:                    }
225:                    sl = new StringLiteralSymbol(literal);
226:                    int tt = grammar.tokenManager.nextTokenType();
227:                    sl.setTokenType(tt);
228:                    sl.setLabel(name);
229:                    grammar.tokenManager.define(sl);
230:                    if (name != null) {
231:                        // make the label point at token symbol too
232:                        grammar.tokenManager.mapToTokenSymbol(name, sl);
233:                    }
234:                }
235:
236:                // create a token in the token manager not a literal
237:                else {
238:                    if (grammar.tokenManager.tokenDefined(name)) {
239:                        tool.warning("Redefinition of token in tokens {...}: "
240:                                + name, grammar.getFilename(), tokname
241:                                .getLine(), tokname.getColumn());
242:                        return;
243:                    }
244:                    int tt = grammar.tokenManager.nextTokenType();
245:                    TokenSymbol ts = new TokenSymbol(name);
246:                    ts.setTokenType(tt);
247:                    grammar.tokenManager.define(ts);
248:                }
249:            }
250:
251:            public void endAlt() {
252:            }
253:
254:            public void endChildList() {
255:            }
256:
257:            public void endExceptionGroup() {
258:            }
259:
260:            public void endExceptionSpec() {
261:            }
262:
263:            public void endGrammar() {
264:            }
265:
266:            /** Called after the optional options section, to compensate for
267:             * options that may not have been set.
268:             * This method is bigger than it needs to be, but is much more
269:             * clear if I delineate all the cases.
270:             */
271:            public void endOptions() {
272:                // NO VOCAB OPTIONS
273:                if (grammar.exportVocab == null && grammar.importVocab == null) {
274:                    grammar.exportVocab = grammar.getClassName();
275:                    // Can we get initial vocab from default shared vocab?
276:                    if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
277:                        // Use the already-defined token manager
278:                        grammar.exportVocab = DEFAULT_TOKENMANAGER_NAME;
279:                        TokenManager tm = (TokenManager) tokenManagers
280:                                .get(DEFAULT_TOKENMANAGER_NAME);
281:                        // System.out.println("No tokenVocabulary for '" + grammar.getClassName() + "', using default '" + tm.getName() + "'");
282:                        grammar.setTokenManager(tm);
283:                        return;
284:                    }
285:                    // no shared vocab for file, make new one
286:                    // System.out.println("No exportVocab for '" + grammar.getClassName() + "', creating default '" + grammar.exportVocab + "'");
287:                    TokenManager tm = new SimpleTokenManager(
288:                            grammar.exportVocab, tool);
289:                    grammar.setTokenManager(tm);
290:                    // Add the token manager to the list of token managers
291:                    tokenManagers.put(grammar.exportVocab, tm);
292:                    // no default vocab, so make this the default vocab
293:                    tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
294:                    return;
295:                }
296:
297:                // NO OUTPUT, BUT HAS INPUT VOCAB
298:                if (grammar.exportVocab == null && grammar.importVocab != null) {
299:                    grammar.exportVocab = grammar.getClassName();
300:                    // first make sure input!=output
301:                    if (grammar.importVocab.equals(grammar.exportVocab)) {
302:                        tool
303:                                .warning("Grammar "
304:                                        + grammar.getClassName()
305:                                        + " cannot have importVocab same as default output vocab (grammar name); ignored.");
306:                        // kill importVocab option and try again: use default vocab
307:                        grammar.importVocab = null;
308:                        endOptions();
309:                        return;
310:                    }
311:                    // check to see if the vocab is already in memory
312:                    // (defined by another grammar in the file).  Not normal situation.
313:                    if (tokenManagers.containsKey(grammar.importVocab)) {
314:                        // make a copy since we'll be generating a new output vocab
315:                        // and we don't want to affect this one.  Set the name to
316:                        // the default output vocab==classname.
317:                        TokenManager tm = (TokenManager) tokenManagers
318:                                .get(grammar.importVocab);
319:                        // System.out.println("Duping importVocab of " + grammar.importVocab);
320:                        TokenManager dup = (TokenManager) tm.clone();
321:                        dup.setName(grammar.exportVocab);
322:                        // System.out.println("Setting name to " + grammar.exportVocab);
323:                        dup.setReadOnly(false);
324:                        grammar.setTokenManager(dup);
325:                        tokenManagers.put(grammar.exportVocab, dup);
326:                        return;
327:                    }
328:                    // System.out.println("reading in vocab "+grammar.importVocab);
329:                    // Must be a file, go get it.
330:                    ImportVocabTokenManager tm = new ImportVocabTokenManager(
331:                            grammar, grammar.importVocab
332:                                    + CodeGenerator.TokenTypesFileSuffix
333:                                    + CodeGenerator.TokenTypesFileExt,
334:                            grammar.exportVocab, tool);
335:                    tm.setReadOnly(false); // since renamed, can write out
336:                    // Add this token manager to the list so its tokens will be generated
337:                    tokenManagers.put(grammar.exportVocab, tm);
338:                    // System.out.println("vocab renamed to default output vocab of "+tm.getName());
339:                    // Assign the token manager to this grammar.
340:                    grammar.setTokenManager(tm);
341:
342:                    // set default vocab if none
343:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
344:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
345:                    }
346:
347:                    return;
348:                }
349:
350:                // OUTPUT VOCAB, BUT NO INPUT VOCAB
351:                if (grammar.exportVocab != null && grammar.importVocab == null) {
352:                    // share with previous vocab if it exists
353:                    if (tokenManagers.containsKey(grammar.exportVocab)) {
354:                        // Use the already-defined token manager
355:                        TokenManager tm = (TokenManager) tokenManagers
356:                                .get(grammar.exportVocab);
357:                        // System.out.println("Sharing exportVocab of " + grammar.exportVocab);
358:                        grammar.setTokenManager(tm);
359:                        return;
360:                    }
361:                    // create new output vocab
362:                    // System.out.println("Creating exportVocab " + grammar.exportVocab);
363:                    TokenManager tm = new SimpleTokenManager(
364:                            grammar.exportVocab, tool);
365:                    grammar.setTokenManager(tm);
366:                    // Add the token manager to the list of token managers
367:                    tokenManagers.put(grammar.exportVocab, tm);
368:                    // set default vocab if none
369:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
370:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
371:                    }
372:                    return;
373:                }
374:
375:                // BOTH INPUT AND OUTPUT VOCAB
376:                if (grammar.exportVocab != null && grammar.importVocab != null) {
377:                    // don't want input==output
378:                    if (grammar.importVocab.equals(grammar.exportVocab)) {
379:                        tool
380:                                .error("exportVocab of "
381:                                        + grammar.exportVocab
382:                                        + " same as importVocab; probably not what you want");
383:                    }
384:                    // does the input vocab already exist in memory?
385:                    if (tokenManagers.containsKey(grammar.importVocab)) {
386:                        // make a copy since we'll be generating a new output vocab
387:                        // and we don't want to affect this one.
388:                        TokenManager tm = (TokenManager) tokenManagers
389:                                .get(grammar.importVocab);
390:                        // System.out.println("Duping importVocab of " + grammar.importVocab);
391:                        TokenManager dup = (TokenManager) tm.clone();
392:                        dup.setName(grammar.exportVocab);
393:                        // System.out.println("Setting name to " + grammar.exportVocab);
394:                        dup.setReadOnly(false);
395:                        grammar.setTokenManager(dup);
396:                        tokenManagers.put(grammar.exportVocab, dup);
397:                        return;
398:                    }
399:                    // Must be a file, go get it.
400:                    ImportVocabTokenManager tm = new ImportVocabTokenManager(
401:                            grammar, grammar.importVocab
402:                                    + CodeGenerator.TokenTypesFileSuffix
403:                                    + CodeGenerator.TokenTypesFileExt,
404:                            grammar.exportVocab, tool);
405:                    tm.setReadOnly(false); // write it out as we've changed name
406:                    // Add this token manager to the list so its tokens will be generated
407:                    tokenManagers.put(grammar.exportVocab, tm);
408:                    // Assign the token manager to this grammar.
409:                    grammar.setTokenManager(tm);
410:
411:                    // set default vocab if none
412:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
413:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
414:                    }
415:
416:                    return;
417:                }
418:            }
419:
420:            public void endRule(String r) {
421:            }
422:
423:            public void endSubRule() {
424:            }
425:
426:            public void endTree() {
427:            }
428:
429:            public void hasError() {
430:            }
431:
432:            public void noASTSubRule() {
433:            }
434:
435:            public void oneOrMoreSubRule() {
436:            }
437:
438:            public void optionalSubRule() {
439:            }
440:
441:            public void setUserExceptions(String thr) {
442:            }
443:
444:            public void refAction(Token action) {
445:            }
446:
447:            public void refArgAction(Token action) {
448:            }
449:
450:            public void refCharLiteral(Token lit, Token label,
451:                    boolean inverted, int autoGenType, boolean lastInRule) {
452:            }
453:
454:            public void refCharRange(Token t1, Token t2, Token label,
455:                    int autoGenType, boolean lastInRule) {
456:            }
457:
458:            public void refElementOption(Token option, Token value) {
459:            }
460:
461:            public void refTokensSpecElementOption(Token tok, Token option,
462:                    Token value) {
463:            }
464:
465:            public void refExceptionHandler(Token exTypeAndName, Token action) {
466:            }
467:
468:            // Header action applies to all parsers and lexers.
469:            public void refHeaderAction(Token name, Token act) {
470:                String key;
471:
472:                if (name == null)
473:                    key = "";
474:                else
475:                    key = StringUtils
476:                            .stripFrontBack(name.getText(), "\"", "\"");
477:
478:                // FIXME: depending on the mode the inserted header actions should
479:                // be checked for sanity.
480:                if (headerActions.containsKey(key)) {
481:                    if (key.equals(""))
482:                        tool.error(act.getLine()
483:                                + ": header action already defined");
484:                    else
485:                        tool.error(act.getLine() + ": header action '" + key
486:                                + "' already defined");
487:                }
488:                headerActions.put(key, act);
489:            }
490:
491:            public String getHeaderAction(String name) {
492:                Token t = (Token) headerActions.get(name);
493:                if (t == null) {
494:                    return "";
495:                }
496:                return t.getText();
497:            }
498:
499:            public void refInitAction(Token action) {
500:            }
501:
502:            public void refMemberAction(Token act) {
503:            }
504:
505:            public void refPreambleAction(Token act) {
506:                thePreambleAction = act;
507:            }
508:
509:            public void refReturnAction(Token returnAction) {
510:            }
511:
512:            public void refRule(Token idAssign, Token r, Token label,
513:                    Token args, int autoGenType) {
514:                String id = r.getText();
515:                //		if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
516:                if (r.type == ANTLRTokenTypes.TOKEN_REF) {
517:                    // lexer rule?
518:                    id = CodeGenerator.encodeLexerRuleName(id);
519:                }
520:                if (!grammar.isDefined(id)) {
521:                    grammar.define(new RuleSymbol(id));
522:                }
523:            }
524:
525:            public void refSemPred(Token pred) {
526:            }
527:
528:            public void refStringLiteral(Token lit, Token label,
529:                    int autoGenType, boolean lastInRule) {
530:                _refStringLiteral(lit, label, autoGenType, lastInRule);
531:            }
532:
533:            /** Reference a token */
534:            public void refToken(Token assignId, Token t, Token label,
535:                    Token args, boolean inverted, int autoGenType,
536:                    boolean lastInRule) {
537:                _refToken(assignId, t, label, args, inverted, autoGenType,
538:                        lastInRule);
539:            }
540:
541:            public void refTokenRange(Token t1, Token t2, Token label,
542:                    int autoGenType, boolean lastInRule) {
543:                // ensure that the DefineGrammarSymbols methods are called; otherwise a range addes more
544:                // token refs to the alternative by calling MakeGrammar.refToken etc...
545:                if (t1.getText().charAt(0) == '"') {
546:                    refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE,
547:                            lastInRule);
548:                } else {
549:                    _refToken(null, t1, null, null, false,
550:                            GrammarElement.AUTO_GEN_NONE, lastInRule);
551:                }
552:                if (t2.getText().charAt(0) == '"') {
553:                    _refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE,
554:                            lastInRule);
555:                } else {
556:                    _refToken(null, t2, null, null, false,
557:                            GrammarElement.AUTO_GEN_NONE, lastInRule);
558:                }
559:            }
560:
561:            public void refTreeSpecifier(Token treeSpec) {
562:            }
563:
564:            public void refWildcard(Token t, Token label, int autoGenType) {
565:            }
566:
567:            /** Get ready to process a new grammar */
568:            public void reset() {
569:                grammar = null;
570:            }
571:
572:            public void setArgOfRuleRef(Token argaction) {
573:            }
574:
575:            /** Set the character vocabulary for a lexer */
576:            public void setCharVocabulary(BitSet b) {
577:                // grammar should enforce that this is only called for lexer
578:                ((LexerGrammar) grammar).setCharVocabulary(b);
579:            }
580:
581:            /** setFileOption: Associate an option value with a key.
582:             * This applies to options for an entire grammar file.
583:             * @param key The token containing the option name
584:             * @param value The token containing the option value.
585:             */
586:            public void setFileOption(Token key, Token value, String filename) {
587:                if (key.getText().equals("language")) {
588:                    if (value.getType() == ANTLRParser.STRING_LITERAL) {
589:                        language = StringUtils.stripBack(StringUtils
590:                                .stripFront(value.getText(), '"'), '"');
591:                    } else if (value.getType() == ANTLRParser.TOKEN_REF
592:                            || value.getType() == ANTLRParser.RULE_REF) {
593:                        language = value.getText();
594:                    } else {
595:                        tool.error(
596:                                "language option must be string or identifier",
597:                                filename, value.getLine(), value.getColumn());
598:                    }
599:                } else if (key.getText().equals("mangleLiteralPrefix")) {
600:                    if (value.getType() == ANTLRParser.STRING_LITERAL) {
601:                        tool.literalsPrefix = StringUtils.stripFrontBack(value
602:                                .getText(), "\"", "\"");
603:                    } else {
604:                        tool.error("mangleLiteralPrefix option must be string",
605:                                filename, value.getLine(), value.getColumn());
606:                    }
607:                } else if (key.getText().equals("upperCaseMangledLiterals")) {
608:                    if (value.getText().equals("true")) {
609:                        tool.upperCaseMangledLiterals = true;
610:                    } else if (value.getText().equals("false")) {
611:                        tool.upperCaseMangledLiterals = false;
612:                    } else {
613:                        grammar.antlrTool
614:                                .error(
615:                                        "Value for upperCaseMangledLiterals must be true or false",
616:                                        filename, key.getLine(), key
617:                                                .getColumn());
618:                    }
619:                } else if (key.getText().equals("namespaceStd")
620:                        || key.getText().equals("namespaceAntlr")
621:                        || key.getText().equals("genHashLines")) {
622:                    if (!language.equals("Cpp")) {
623:                        tool.error(
624:                                key.getText() + " option only valid for C++",
625:                                filename, key.getLine(), key.getColumn());
626:                    } else {
627:                        if (key.getText().equals("noConstructors")) {
628:                            if (!(value.getText().equals("true") || value
629:                                    .getText().equals("false")))
630:                                tool
631:                                        .error(
632:                                                "noConstructors option must be true or false",
633:                                                filename, value.getLine(),
634:                                                value.getColumn());
635:                            tool.noConstructors = value.getText()
636:                                    .equals("true");
637:                        } else if (key.getText().equals("genHashLines")) {
638:                            if (!(value.getText().equals("true") || value
639:                                    .getText().equals("false")))
640:                                tool
641:                                        .error(
642:                                                "genHashLines option must be true or false",
643:                                                filename, value.getLine(),
644:                                                value.getColumn());
645:                            tool.genHashLines = value.getText().equals("true");
646:                        } else {
647:                            if (value.getType() != ANTLRParser.STRING_LITERAL) {
648:                                tool.error(key.getText()
649:                                        + " option must be a string", filename,
650:                                        value.getLine(), value.getColumn());
651:                            } else {
652:                                if (key.getText().equals("namespaceStd"))
653:                                    tool.namespaceStd = value.getText();
654:                                else if (key.getText().equals("namespaceAntlr"))
655:                                    tool.namespaceAntlr = value.getText();
656:                            }
657:                        }
658:                    }
659:                } else if (key.getText().equals("namespace")) {
660:                    if (!language.equals("Cpp") && !language.equals("CSharp")) {
661:                        tool
662:                                .error(
663:                                        key.getText()
664:                                                + " option only valid for C++ and C# (a.k.a CSharp)",
665:                                        filename, key.getLine(), key
666:                                                .getColumn());
667:                    } else {
668:                        if (value.getType() != ANTLRParser.STRING_LITERAL) {
669:                            tool.error(key.getText()
670:                                    + " option must be a string", filename,
671:                                    value.getLine(), value.getColumn());
672:                        } else {
673:                            if (key.getText().equals("namespace"))
674:                                tool.setNameSpace(value.getText());
675:                        }
676:                    }
677:                } else {
678:                    tool.error("Invalid file-level option: " + key.getText(),
679:                            filename, key.getLine(), value.getColumn());
680:                }
681:            }
682:
683:            /** setGrammarOption: Associate an option value with a key.
684:             * This function forwards to Grammar.setOption for some options.
685:             * @param key The token containing the option name
686:             * @param value The token containing the option value.
687:             */
688:            public void setGrammarOption(Token key, Token value) {
689:                if (key.getText().equals("tokdef")
690:                        || key.getText().equals("tokenVocabulary")) {
691:                    tool
692:                            .error(
693:                                    "tokdef/tokenVocabulary options are invalid >= ANTLR 2.6.0.\n"
694:                                            + "  Use importVocab/exportVocab instead.  Please see the documentation.\n"
695:                                            + "  The previous options were so heinous that Terence changed the whole\n"
696:                                            + "  vocabulary mechanism; it was better to change the names rather than\n"
697:                                            + "  subtly change the functionality of the known options.  Sorry!",
698:                                    grammar.getFilename(), value.getLine(),
699:                                    value.getColumn());
700:                } else if (key.getText().equals("literal")
701:                        && grammar instanceof  LexerGrammar) {
702:                    tool
703:                            .error(
704:                                    "the literal option is invalid >= ANTLR 2.6.0.\n"
705:                                            + "  Use the \"tokens {...}\" mechanism instead.",
706:                                    grammar.getFilename(), value.getLine(),
707:                                    value.getColumn());
708:                } else if (key.getText().equals("exportVocab")) {
709:                    // Set the token manager associated with the parser
710:                    if (value.getType() == ANTLRParser.RULE_REF
711:                            || value.getType() == ANTLRParser.TOKEN_REF) {
712:                        grammar.exportVocab = value.getText();
713:                    } else {
714:                        tool.error("exportVocab must be an identifier", grammar
715:                                .getFilename(), value.getLine(), value
716:                                .getColumn());
717:                    }
718:                } else if (key.getText().equals("importVocab")) {
719:                    if (value.getType() == ANTLRParser.RULE_REF
720:                            || value.getType() == ANTLRParser.TOKEN_REF) {
721:                        grammar.importVocab = value.getText();
722:                    } else {
723:                        tool.error("importVocab must be an identifier", grammar
724:                                .getFilename(), value.getLine(), value
725:                                .getColumn());
726:                    }
727:                } else {
728:                    // Forward all unrecognized options to the grammar
729:                    grammar.setOption(key.getText(), value);
730:                }
731:            }
732:
733:            public void setRuleOption(Token key, Token value) {
734:            }
735:
736:            public void setSubruleOption(Token key, Token value) {
737:            }
738:
739:            /** Start a new lexer */
740:            public void startLexer(String file, Token name, String super Class,
741:                    String doc) {
742:                if (numLexers > 0) {
743:                    tool
744:                            .panic("You may only have one lexer per grammar file: class "
745:                                    + name.getText());
746:                }
747:                numLexers++;
748:                reset();
749:                //System.out.println("Processing lexer '" + name.getText() + "'");
750:                // Does the lexer already exist?
751:                Grammar g = (Grammar) grammars.get(name);
752:                if (g != null) {
753:                    if (!(g instanceof  LexerGrammar)) {
754:                        tool.panic("'" + name.getText()
755:                                + "' is already defined as a non-lexer");
756:                    } else {
757:                        tool.panic("Lexer '" + name.getText()
758:                                + "' is already defined");
759:                    }
760:                } else {
761:                    // Create a new lexer grammar
762:                    LexerGrammar lg = new LexerGrammar(name.getText(), tool,
763:                            super Class);
764:                    lg.comment = doc;
765:                    lg.processArguments(args);
766:                    lg.setFilename(file);
767:                    grammars.put(lg.getClassName(), lg);
768:                    // Use any preamble action
769:                    lg.preambleAction = thePreambleAction;
770:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
771:                    // This is now the current grammar
772:                    grammar = lg;
773:                }
774:            }
775:
776:            /** Start a new parser */
777:            public void startParser(String file, Token name, String super Class,
778:                    String doc) {
779:                if (numParsers > 0) {
780:                    tool
781:                            .panic("You may only have one parser per grammar file: class "
782:                                    + name.getText());
783:                }
784:                numParsers++;
785:                reset();
786:                //System.out.println("Processing parser '" + name.getText() + "'");
787:                // Is this grammar already defined?
788:                Grammar g = (Grammar) grammars.get(name);
789:                if (g != null) {
790:                    if (!(g instanceof  ParserGrammar)) {
791:                        tool.panic("'" + name.getText()
792:                                + "' is already defined as a non-parser");
793:                    } else {
794:                        tool.panic("Parser '" + name.getText()
795:                                + "' is already defined");
796:                    }
797:                } else {
798:                    // Create a new grammar
799:                    grammar = new ParserGrammar(name.getText(), tool,
800:                            super Class);
801:                    grammar.comment = doc;
802:                    grammar.processArguments(args);
803:                    grammar.setFilename(file);
804:                    grammars.put(grammar.getClassName(), grammar);
805:                    // Use any preamble action
806:                    grammar.preambleAction = thePreambleAction;
807:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
808:                }
809:            }
810:
811:            /** Start a new tree-walker */
812:            public void startTreeWalker(String file, Token name,
813:                    String super Class, String doc) {
814:                if (numTreeParsers > 0) {
815:                    tool
816:                            .panic("You may only have one tree parser per grammar file: class "
817:                                    + name.getText());
818:                }
819:                numTreeParsers++;
820:                reset();
821:                //System.out.println("Processing tree-walker '" + name.getText() + "'");
822:                // Is this grammar already defined?
823:                Grammar g = (Grammar) grammars.get(name);
824:                if (g != null) {
825:                    if (!(g instanceof  TreeWalkerGrammar)) {
826:                        tool.panic("'" + name.getText()
827:                                + "' is already defined as a non-tree-walker");
828:                    } else {
829:                        tool.panic("Tree-walker '" + name.getText()
830:                                + "' is already defined");
831:                    }
832:                } else {
833:                    // Create a new grammar
834:                    grammar = new TreeWalkerGrammar(name.getText(), tool,
835:                            super Class);
836:                    grammar.comment = doc;
837:                    grammar.processArguments(args);
838:                    grammar.setFilename(file);
839:                    grammars.put(grammar.getClassName(), grammar);
840:                    // Use any preamble action
841:                    grammar.preambleAction = thePreambleAction;
842:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
843:                }
844:            }
845:
846:            public void synPred() {
847:            }
848:
849:            public void zeroOrMoreSubRule() {
850:            }
851:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.