Source Code Cross Referenced for DefineGrammarSymbols.java in  » IDE-Netbeans » cnd » antlr » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » IDE Netbeans » cnd » antlr 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package antlr;
002:
003:        /* ANTLR Translator Generator
004:         * Project led by Terence Parr at http://www.cs.usfca.edu
005:         * Software rights: http://www.antlr.org/license.html
006:         */
007:
008:        import java.util.Hashtable;
009:
010:        import antlr.collections.impl.BitSet;
011:
012:        /**DefineGrammarSymbols is a behavior for the ANTLRParser that adds all
013:         * the token and rule symbols to the grammar symbol table.
014:         *
015:         * Token types are assigned to token symbols in this class also.
016:         * The token type for a token is done in the order seen (lexically).
017:         */
018:        public class DefineGrammarSymbols implements  ANTLRGrammarParseBehavior {
019:            // Contains all of the defined parser and lexer Grammar's indexed by name
020:            protected Hashtable grammars = new Hashtable();
021:            // Contains all the TokenManagers indexed by name
022:            protected Hashtable tokenManagers = new Hashtable();
023:            // Current grammar (parser or lexer)
024:            protected Grammar grammar;
025:            // The tool under which this is invoked
026:            protected Tool tool;
027:            // The grammar analyzer object
028:            LLkAnalyzer analyzer;
029:            // The command-line arguments passed to the tool.
030:            // This allows each grammar to parse the arguments as it is created
031:            String[] args;
032:            // Name for default token manager does not match any valid name
033:            static final String DEFAULT_TOKENMANAGER_NAME = "*default";
034:            // Header actions apply to all parsers unless redefined
035:            // Contains all of the header actions indexed by name
036:            protected Hashtable headerActions = new Hashtable();
037:            // Place where preamble is stored until a grammar is defined
038:            Token thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); // init to empty token
039:            // The target language
040:            String language = "Java";
041:
042:            protected int numLexers = 0;
043:            protected int numParsers = 0;
044:            protected int numTreeParsers = 0;
045:
046:            public DefineGrammarSymbols(Tool tool_, String[] args_,
047:                    LLkAnalyzer analyzer_) {
048:                tool = tool_;
049:                args = args_;
050:                analyzer = analyzer_;
051:            }
052:
053:            public void _refStringLiteral(Token lit, Token label,
054:                    int autoGenType, boolean lastInRule) {
055:                if (!(grammar instanceof  LexerGrammar)) {
056:                    // String literals are treated like tokens except by the lexer
057:                    String str = lit.getText();
058:                    if (grammar.tokenManager.getTokenSymbol(str) != null) {
059:                        // string symbol is already defined
060:                        return;
061:                    }
062:                    StringLiteralSymbol sl = new StringLiteralSymbol(str);
063:                    int tt = grammar.tokenManager.nextTokenType();
064:                    sl.setTokenType(tt);
065:                    grammar.tokenManager.define(sl);
066:                }
067:            }
068:
069:            /** Reference a token */
070:            public void _refToken(Token assignId, Token t, Token label,
071:                    Token args, boolean inverted, int autoGenType,
072:                    boolean lastInRule) {
073:                String id = t.getText();
074:                if (!grammar.tokenManager.tokenDefined(id)) {
075:                    /*
076:                    // RK: dish out a warning if the token was not defined before.
077:                    tool.warning("Token '" + id + "' defined outside tokens section",
078:                                 tool.grammarFile, t.getLine(), t.getColumn());
079:                     */
080:                    int tt = grammar.tokenManager.nextTokenType();
081:                    TokenSymbol ts = new TokenSymbol(id);
082:                    ts.setTokenType(tt);
083:                    grammar.tokenManager.define(ts);
084:                }
085:            }
086:
087:            /** Abort the processing of a grammar due to syntax errors */
088:            public void abortGrammar() {
089:                if (grammar != null && grammar.getClassName() != null) {
090:                    grammars.remove(grammar.getClassName());
091:                }
092:                grammar = null;
093:            }
094:
095:            public void beginAlt(boolean doAST_) {
096:            }
097:
098:            public void beginChildList() {
099:            }
100:
101:            // Exception handling
102:            public void beginExceptionGroup() {
103:            }
104:
105:            public void beginExceptionSpec(Token label) {
106:            }
107:
108:            public void beginSubRule(Token label, Token start, boolean not) {
109:            }
110:
111:            public void beginTree(Token tok) throws SemanticException {
112:            }
113:
114:            /** Define a lexer or parser rule */
115:            public void defineRuleName(Token r, String access,
116:                    boolean ruleAutoGen, String docComment)
117:                    throws SemanticException {
118:                String id = r.getText();
119:
120:                //		if ( Character.isUpperCase(id.charAt(0)) ) {
121:                if (r.getType() == ANTLRTokenTypes.TOKEN_REF) {
122:                    // lexer rule
123:                    id = CodeGenerator.encodeLexerRuleName(id);
124:                    // make sure we define it as token identifier also
125:                    if (!grammar.tokenManager.tokenDefined(r.getText())) {
126:                        int tt = grammar.tokenManager.nextTokenType();
127:                        TokenSymbol ts = new TokenSymbol(r.getText());
128:                        ts.setTokenType(tt);
129:                        grammar.tokenManager.define(ts);
130:                    }
131:                }
132:
133:                RuleSymbol rs;
134:                if (grammar.isDefined(id)) {
135:                    // symbol seen before?
136:                    rs = (RuleSymbol) grammar.getSymbol(id);
137:                    // rule just referenced or has it been defined yet?
138:                    if (rs.isDefined()) {
139:                        tool.error("redefinition of rule " + id, grammar
140:                                .getFilename(), r.getLine(), r.getColumn());
141:                    }
142:                } else {
143:                    rs = new RuleSymbol(id);
144:                    grammar.define(rs);
145:                }
146:                rs.setDefined();
147:                rs.access = access;
148:                rs.comment = docComment;
149:            }
150:
151:            /** Define a token from tokens {...}.
152:             *  Must be label and literal or just label or just a literal.
153:             */
154:            public void defineToken(Token tokname, Token tokliteral) {
155:                String name = null;
156:                String literal = null;
157:                if (tokname != null) {
158:                    name = tokname.getText();
159:                }
160:                if (tokliteral != null) {
161:                    literal = tokliteral.getText();
162:                }
163:                // System.out.println("defining " + name + " with literal " + literal);
164:                //
165:                if (literal != null) {
166:                    StringLiteralSymbol sl = (StringLiteralSymbol) grammar.tokenManager
167:                            .getTokenSymbol(literal);
168:                    if (sl != null) {
169:                        // This literal is known already.
170:                        // If the literal has no label already, but we can provide
171:                        // one here, then no problem, just map the label to the literal
172:                        // and don't change anything else.
173:                        // Otherwise, labels conflict: error.
174:                        if (name == null || sl.getLabel() != null) {
175:                            tool.warning(
176:                                    "Redefinition of literal in tokens {...}: "
177:                                            + literal, grammar.getFilename(),
178:                                    tokliteral.getLine(), tokliteral
179:                                            .getColumn());
180:                            return;
181:                        } else if (name != null) {
182:                            // The literal had no label, but new def does.  Set it.
183:                            sl.setLabel(name);
184:                            // Also, map the label to the literal.
185:                            grammar.tokenManager.mapToTokenSymbol(name, sl);
186:                        }
187:                    }
188:                    // if they provide a name/label and that name/label already
189:                    // exists, just hook this literal onto old token.
190:                    if (name != null) {
191:                        TokenSymbol ts = (TokenSymbol) grammar.tokenManager
192:                                .getTokenSymbol(name);
193:                        if (ts != null) {
194:                            // watch out that the label is not more than just a token.
195:                            // If it already has a literal attached, then: conflict.
196:                            if (ts instanceof  StringLiteralSymbol) {
197:                                tool.warning(
198:                                        "Redefinition of token in tokens {...}: "
199:                                                + name, grammar.getFilename(),
200:                                        tokliteral.getLine(), tokliteral
201:                                                .getColumn());
202:                                return;
203:                            }
204:                            // a simple token symbol such as DECL is defined
205:                            // must convert it to a StringLiteralSymbol with a
206:                            // label by co-opting token type and killing old
207:                            // TokenSymbol.  Kill mapping and entry in vector
208:                            // of token manager.
209:                            // First, claim token type.
210:                            int ttype = ts.getTokenType();
211:                            // now, create string literal with label
212:                            sl = new StringLiteralSymbol(literal);
213:                            sl.setTokenType(ttype);
214:                            sl.setLabel(name);
215:                            // redefine this critter as a string literal
216:                            grammar.tokenManager.define(sl);
217:                            // make sure the label can be used also.
218:                            grammar.tokenManager.mapToTokenSymbol(name, sl);
219:                            return;
220:                        }
221:                        // here, literal was labeled but not by a known token symbol.
222:                    }
223:                    sl = new StringLiteralSymbol(literal);
224:                    int tt = grammar.tokenManager.nextTokenType();
225:                    sl.setTokenType(tt);
226:                    sl.setLabel(name);
227:                    grammar.tokenManager.define(sl);
228:                    if (name != null) {
229:                        // make the label point at token symbol too
230:                        grammar.tokenManager.mapToTokenSymbol(name, sl);
231:                    }
232:                }
233:
234:                // create a token in the token manager not a literal
235:                else {
236:                    if (grammar.tokenManager.tokenDefined(name)) {
237:                        tool.warning("Redefinition of token in tokens {...}: "
238:                                + name, grammar.getFilename(), tokname
239:                                .getLine(), tokname.getColumn());
240:                        return;
241:                    }
242:                    int tt = grammar.tokenManager.nextTokenType();
243:                    TokenSymbol ts = new TokenSymbol(name);
244:                    ts.setTokenType(tt);
245:                    grammar.tokenManager.define(ts);
246:                }
247:            }
248:
249:            public void endAlt() {
250:            }
251:
252:            public void endChildList() {
253:            }
254:
255:            public void endExceptionGroup() {
256:            }
257:
258:            public void endExceptionSpec() {
259:            }
260:
261:            public void endGrammar() {
262:            }
263:
264:            /** Called after the optional options section, to compensate for
265:             * options that may not have been set.
266:             * This method is bigger than it needs to be, but is much more
267:             * clear if I delineate all the cases.
268:             */
269:            public void endOptions() {
270:                // NO VOCAB OPTIONS
271:                if (grammar.exportVocab == null && grammar.importVocab == null) {
272:                    grammar.exportVocab = grammar.getClassName();
273:                    // Can we get initial vocab from default shared vocab?
274:                    if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
275:                        // Use the already-defined token manager
276:                        grammar.exportVocab = DEFAULT_TOKENMANAGER_NAME;
277:                        TokenManager tm = (TokenManager) tokenManagers
278:                                .get(DEFAULT_TOKENMANAGER_NAME);
279:                        // System.out.println("No tokenVocabulary for '" + grammar.getClassName() + "', using default '" + tm.getName() + "'");
280:                        grammar.setTokenManager(tm);
281:                        return;
282:                    }
283:                    // no shared vocab for file, make new one
284:                    // System.out.println("No exportVocab for '" + grammar.getClassName() + "', creating default '" + grammar.exportVocab + "'");
285:                    TokenManager tm = new SimpleTokenManager(
286:                            grammar.exportVocab, tool);
287:                    grammar.setTokenManager(tm);
288:                    // Add the token manager to the list of token managers
289:                    tokenManagers.put(grammar.exportVocab, tm);
290:                    // no default vocab, so make this the default vocab
291:                    tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
292:                    return;
293:                }
294:
295:                // NO OUTPUT, BUT HAS INPUT VOCAB
296:                if (grammar.exportVocab == null && grammar.importVocab != null) {
297:                    grammar.exportVocab = grammar.getClassName();
298:                    // first make sure input!=output
299:                    if (grammar.importVocab.equals(grammar.exportVocab)) {
300:                        tool
301:                                .warning("Grammar "
302:                                        + grammar.getClassName()
303:                                        + " cannot have importVocab same as default output vocab (grammar name); ignored.");
304:                        // kill importVocab option and try again: use default vocab
305:                        grammar.importVocab = null;
306:                        endOptions();
307:                        return;
308:                    }
309:                    // check to see if the vocab is already in memory
310:                    // (defined by another grammar in the file).  Not normal situation.
311:                    if (tokenManagers.containsKey(grammar.importVocab)) {
312:                        // make a copy since we'll be generating a new output vocab
313:                        // and we don't want to affect this one.  Set the name to
314:                        // the default output vocab==classname.
315:                        TokenManager tm = (TokenManager) tokenManagers
316:                                .get(grammar.importVocab);
317:                        // System.out.println("Duping importVocab of " + grammar.importVocab);
318:                        TokenManager dup = (TokenManager) tm.clone();
319:                        dup.setName(grammar.exportVocab);
320:                        // System.out.println("Setting name to " + grammar.exportVocab);
321:                        dup.setReadOnly(false);
322:                        grammar.setTokenManager(dup);
323:                        tokenManagers.put(grammar.exportVocab, dup);
324:                        return;
325:                    }
326:                    // System.out.println("reading in vocab "+grammar.importVocab);
327:                    // Must be a file, go get it.
328:                    ImportVocabTokenManager tm = new ImportVocabTokenManager(
329:                            grammar, grammar.importVocab
330:                                    + CodeGenerator.TokenTypesFileSuffix
331:                                    + CodeGenerator.TokenTypesFileExt,
332:                            grammar.exportVocab, tool);
333:                    tm.setReadOnly(false); // since renamed, can write out
334:                    // Add this token manager to the list so its tokens will be generated
335:                    tokenManagers.put(grammar.exportVocab, tm);
336:                    // System.out.println("vocab renamed to default output vocab of "+tm.getName());
337:                    // Assign the token manager to this grammar.
338:                    grammar.setTokenManager(tm);
339:
340:                    // set default vocab if none
341:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
342:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
343:                    }
344:
345:                    return;
346:                }
347:
348:                // OUTPUT VOCAB, BUT NO INPUT VOCAB
349:                if (grammar.exportVocab != null && grammar.importVocab == null) {
350:                    // share with previous vocab if it exists
351:                    if (tokenManagers.containsKey(grammar.exportVocab)) {
352:                        // Use the already-defined token manager
353:                        TokenManager tm = (TokenManager) tokenManagers
354:                                .get(grammar.exportVocab);
355:                        // System.out.println("Sharing exportVocab of " + grammar.exportVocab);
356:                        grammar.setTokenManager(tm);
357:                        return;
358:                    }
359:                    // create new output vocab
360:                    // System.out.println("Creating exportVocab " + grammar.exportVocab);
361:                    TokenManager tm = new SimpleTokenManager(
362:                            grammar.exportVocab, tool);
363:                    grammar.setTokenManager(tm);
364:                    // Add the token manager to the list of token managers
365:                    tokenManagers.put(grammar.exportVocab, tm);
366:                    // set default vocab if none
367:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
368:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
369:                    }
370:                    return;
371:                }
372:
373:                // BOTH INPUT AND OUTPUT VOCAB
374:                if (grammar.exportVocab != null && grammar.importVocab != null) {
375:                    // don't want input==output
376:                    if (grammar.importVocab.equals(grammar.exportVocab)) {
377:                        tool
378:                                .error("exportVocab of "
379:                                        + grammar.exportVocab
380:                                        + " same as importVocab; probably not what you want");
381:                    }
382:                    // does the input vocab already exist in memory?
383:                    if (tokenManagers.containsKey(grammar.importVocab)) {
384:                        // make a copy since we'll be generating a new output vocab
385:                        // and we don't want to affect this one.
386:                        TokenManager tm = (TokenManager) tokenManagers
387:                                .get(grammar.importVocab);
388:                        // System.out.println("Duping importVocab of " + grammar.importVocab);
389:                        TokenManager dup = (TokenManager) tm.clone();
390:                        dup.setName(grammar.exportVocab);
391:                        // System.out.println("Setting name to " + grammar.exportVocab);
392:                        dup.setReadOnly(false);
393:                        grammar.setTokenManager(dup);
394:                        tokenManagers.put(grammar.exportVocab, dup);
395:                        return;
396:                    }
397:                    // Must be a file, go get it.
398:                    ImportVocabTokenManager tm = new ImportVocabTokenManager(
399:                            grammar, grammar.importVocab
400:                                    + CodeGenerator.TokenTypesFileSuffix
401:                                    + CodeGenerator.TokenTypesFileExt,
402:                            grammar.exportVocab, tool);
403:                    tm.setReadOnly(false); // write it out as we've changed name
404:                    // Add this token manager to the list so its tokens will be generated
405:                    tokenManagers.put(grammar.exportVocab, tm);
406:                    // Assign the token manager to this grammar.
407:                    grammar.setTokenManager(tm);
408:
409:                    // set default vocab if none
410:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
411:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
412:                    }
413:
414:                    return;
415:                }
416:            }
417:
418:            public void endRule(String r) {
419:            }
420:
421:            public void endSubRule() {
422:            }
423:
424:            public void endTree() {
425:            }
426:
427:            public void hasError() {
428:            }
429:
430:            public void noASTSubRule() {
431:            }
432:
433:            public void oneOrMoreSubRule() {
434:            }
435:
436:            public void optionalSubRule() {
437:            }
438:
439:            public void setUserExceptions(String thr) {
440:            }
441:
442:            public void refAction(Token action) {
443:            }
444:
445:            public void refArgAction(Token action) {
446:            }
447:
448:            public void refCharLiteral(Token lit, Token label,
449:                    boolean inverted, int autoGenType, boolean lastInRule) {
450:            }
451:
452:            public void refCharRange(Token t1, Token t2, Token label,
453:                    int autoGenType, boolean lastInRule) {
454:            }
455:
456:            public void refElementOption(Token option, Token value) {
457:            }
458:
459:            public void refTokensSpecElementOption(Token tok, Token option,
460:                    Token value) {
461:            }
462:
463:            public void refExceptionHandler(Token exTypeAndName, Token action) {
464:            }
465:
466:            // Header action applies to all parsers and lexers.
467:            public void refHeaderAction(Token name, Token act) {
468:                String key;
469:
470:                if (name == null)
471:                    key = "";
472:                else
473:                    key = StringUtils
474:                            .stripFrontBack(name.getText(), "\"", "\"");
475:
476:                // FIXME: depending on the mode the inserted header actions should
477:                // be checked for sanity.
478:                if (headerActions.containsKey(key)) {
479:                    if (key.equals(""))
480:                        tool.error(act.getLine()
481:                                + ": header action already defined");
482:                    else
483:                        tool.error(act.getLine() + ": header action '" + key
484:                                + "' already defined");
485:                }
486:                headerActions.put(key, act);
487:            }
488:
489:            public String getHeaderAction(String name) {
490:                Token t = (Token) headerActions.get(name);
491:                if (t == null) {
492:                    return "";
493:                }
494:                return t.getText();
495:            }
496:
497:            public int getHeaderActionLine(String name) {
498:                Token t = (Token) headerActions.get(name);
499:                if (t == null) {
500:                    return 0;
501:                }
502:                return t.getLine();
503:            }
504:
505:            public void refInitAction(Token action) {
506:            }
507:
508:            public void refMemberAction(Token act) {
509:            }
510:
511:            public void refPreambleAction(Token act) {
512:                thePreambleAction = act;
513:            }
514:
515:            public void refReturnAction(Token returnAction) {
516:            }
517:
518:            public void refRule(Token idAssign, Token r, Token label,
519:                    Token args, int autoGenType) {
520:                String id = r.getText();
521:                //		if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
522:                if (r.getType() == ANTLRTokenTypes.TOKEN_REF) {
523:                    // lexer rule?
524:                    id = CodeGenerator.encodeLexerRuleName(id);
525:                }
526:                if (!grammar.isDefined(id)) {
527:                    grammar.define(new RuleSymbol(id));
528:                }
529:            }
530:
531:            public void refSemPred(Token pred) {
532:            }
533:
534:            public void refStringLiteral(Token lit, Token label,
535:                    int autoGenType, boolean lastInRule) {
536:                _refStringLiteral(lit, label, autoGenType, lastInRule);
537:            }
538:
539:            /** Reference a token */
540:            public void refToken(Token assignId, Token t, Token label,
541:                    Token args, boolean inverted, int autoGenType,
542:                    boolean lastInRule) {
543:                _refToken(assignId, t, label, args, inverted, autoGenType,
544:                        lastInRule);
545:            }
546:
547:            public void refTokenRange(Token t1, Token t2, Token label,
548:                    int autoGenType, boolean lastInRule) {
549:                // ensure that the DefineGrammarSymbols methods are called; otherwise a range addes more
550:                // token refs to the alternative by calling MakeGrammar.refToken etc...
551:                if (t1.getText().charAt(0) == '"') {
552:                    refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE,
553:                            lastInRule);
554:                } else {
555:                    _refToken(null, t1, null, null, false,
556:                            GrammarElement.AUTO_GEN_NONE, lastInRule);
557:                }
558:                if (t2.getText().charAt(0) == '"') {
559:                    _refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE,
560:                            lastInRule);
561:                } else {
562:                    _refToken(null, t2, null, null, false,
563:                            GrammarElement.AUTO_GEN_NONE, lastInRule);
564:                }
565:            }
566:
567:            public void refTreeSpecifier(Token treeSpec) {
568:            }
569:
570:            public void refWildcard(Token t, Token label, int autoGenType) {
571:            }
572:
573:            /** Get ready to process a new grammar */
574:            public void reset() {
575:                grammar = null;
576:            }
577:
578:            public void setArgOfRuleRef(Token argaction) {
579:            }
580:
581:            /** Set the character vocabulary for a lexer */
582:            public void setCharVocabulary(BitSet b) {
583:                // grammar should enforce that this is only called for lexer
584:                ((LexerGrammar) grammar).setCharVocabulary(b);
585:            }
586:
587:            /** setFileOption: Associate an option value with a key.
588:             * This applies to options for an entire grammar file.
589:             * @param key The token containing the option name
590:             * @param value The token containing the option value.
591:             */
592:            public void setFileOption(Token key, Token value, String filename) {
593:                if (key.getText().equals("language")) {
594:                    if (value.getType() == ANTLRParser.STRING_LITERAL) {
595:                        language = StringUtils.stripBack(StringUtils
596:                                .stripFront(value.getText(), '"'), '"');
597:                    } else if (value.getType() == ANTLRParser.TOKEN_REF
598:                            || value.getType() == ANTLRParser.RULE_REF) {
599:                        language = value.getText();
600:                    } else {
601:                        tool.error(
602:                                "language option must be string or identifier",
603:                                filename, value.getLine(), value.getColumn());
604:                    }
605:                } else if (key.getText().equals("mangleLiteralPrefix")) {
606:                    if (value.getType() == ANTLRParser.STRING_LITERAL) {
607:                        tool.literalsPrefix = StringUtils.stripFrontBack(value
608:                                .getText(), "\"", "\"");
609:                    } else {
610:                        tool.error("mangleLiteralPrefix option must be string",
611:                                filename, value.getLine(), value.getColumn());
612:                    }
613:                } else if (key.getText().equals("upperCaseMangledLiterals")) {
614:                    if (value.getText().equals("true")) {
615:                        tool.upperCaseMangledLiterals = true;
616:                    } else if (value.getText().equals("false")) {
617:                        tool.upperCaseMangledLiterals = false;
618:                    } else {
619:                        grammar.antlrTool
620:                                .error(
621:                                        "Value for upperCaseMangledLiterals must be true or false",
622:                                        filename, key.getLine(), key
623:                                                .getColumn());
624:                    }
625:                } else if (key.getText().equals("namespaceStd")
626:                        || key.getText().equals("namespaceAntlr")
627:                        || key.getText().equals("genHashLines")) {
628:                    if (!language.equals("Cpp")) {
629:                        tool.error(
630:                                key.getText() + " option only valid for C++",
631:                                filename, key.getLine(), key.getColumn());
632:                    } else {
633:                        if (key.getText().equals("noConstructors")) {
634:                            if (!(value.getText().equals("true") || value
635:                                    .getText().equals("false")))
636:                                tool
637:                                        .error(
638:                                                "noConstructors option must be true or false",
639:                                                filename, value.getLine(),
640:                                                value.getColumn());
641:                            tool.noConstructors = value.getText()
642:                                    .equals("true");
643:                        } else if (key.getText().equals("genHashLines")) {
644:                            if (!(value.getText().equals("true") || value
645:                                    .getText().equals("false")))
646:                                tool
647:                                        .error(
648:                                                "genHashLines option must be true or false",
649:                                                filename, value.getLine(),
650:                                                value.getColumn());
651:                            tool.genHashLines = value.getText().equals("true");
652:                        } else {
653:                            if (value.getType() != ANTLRParser.STRING_LITERAL) {
654:                                tool.error(key.getText()
655:                                        + " option must be a string", filename,
656:                                        value.getLine(), value.getColumn());
657:                            } else {
658:                                if (key.getText().equals("namespaceStd"))
659:                                    tool.namespaceStd = value.getText();
660:                                else if (key.getText().equals("namespaceAntlr"))
661:                                    tool.namespaceAntlr = value.getText();
662:                            }
663:                        }
664:                    }
665:                } else if (key.getText().equals("namespace")) {
666:                    if (!language.equals("Cpp") && !language.equals("CSharp")) {
667:                        tool
668:                                .error(
669:                                        key.getText()
670:                                                + " option only valid for C++ and C# (a.k.a CSharp)",
671:                                        filename, key.getLine(), key
672:                                                .getColumn());
673:                    } else {
674:                        if (value.getType() != ANTLRParser.STRING_LITERAL) {
675:                            tool.error(key.getText()
676:                                    + " option must be a string", filename,
677:                                    value.getLine(), value.getColumn());
678:                        } else {
679:                            if (key.getText().equals("namespace"))
680:                                tool.setNameSpace(value.getText());
681:                        }
682:                    }
683:                } else {
684:                    tool.error("Invalid file-level option: " + key.getText(),
685:                            filename, key.getLine(), value.getColumn());
686:                }
687:            }
688:
689:            /** setGrammarOption: Associate an option value with a key.
690:             * This function forwards to Grammar.setOption for some options.
691:             * @param key The token containing the option name
692:             * @param value The token containing the option value.
693:             */
694:            public void setGrammarOption(Token key, Token value) {
695:                if (key.getText().equals("tokdef")
696:                        || key.getText().equals("tokenVocabulary")) {
697:                    tool
698:                            .error(
699:                                    "tokdef/tokenVocabulary options are invalid >= ANTLR 2.6.0.\n"
700:                                            + "  Use importVocab/exportVocab instead.  Please see the documentation.\n"
701:                                            + "  The previous options were so heinous that Terence changed the whole\n"
702:                                            + "  vocabulary mechanism; it was better to change the names rather than\n"
703:                                            + "  subtly change the functionality of the known options.  Sorry!",
704:                                    grammar.getFilename(), value.getLine(),
705:                                    value.getColumn());
706:                } else if (key.getText().equals("literal")
707:                        && grammar instanceof  LexerGrammar) {
708:                    tool
709:                            .error(
710:                                    "the literal option is invalid >= ANTLR 2.6.0.\n"
711:                                            + "  Use the \"tokens {...}\" mechanism instead.",
712:                                    grammar.getFilename(), value.getLine(),
713:                                    value.getColumn());
714:                } else if (key.getText().equals("exportVocab")) {
715:                    // Set the token manager associated with the parser
716:                    if (value.getType() == ANTLRParser.RULE_REF
717:                            || value.getType() == ANTLRParser.TOKEN_REF) {
718:                        grammar.exportVocab = value.getText();
719:                    } else {
720:                        tool.error("exportVocab must be an identifier", grammar
721:                                .getFilename(), value.getLine(), value
722:                                .getColumn());
723:                    }
724:                } else if (key.getText().equals("importVocab")) {
725:                    if (value.getType() == ANTLRParser.RULE_REF
726:                            || value.getType() == ANTLRParser.TOKEN_REF) {
727:                        grammar.importVocab = value.getText();
728:                    } else {
729:                        tool.error("importVocab must be an identifier", grammar
730:                                .getFilename(), value.getLine(), value
731:                                .getColumn());
732:                    }
733:                } else if (key.getText().equals("k")) {
734:                    if (grammar instanceof  TreeWalkerGrammar
735:                            && !value.getText().equals("1")) {
736:                        tool.error("Treewalkers only support k=1", grammar
737:                                .getFilename(), value.getLine(), value
738:                                .getColumn());
739:                    } else {
740:                        grammar.setOption(key.getText(), value);
741:                    }
742:                } else {
743:                    // Forward all unrecognized options to the grammar
744:                    grammar.setOption(key.getText(), value);
745:                }
746:            }
747:
748:            public void setRuleOption(Token key, Token value) {
749:            }
750:
751:            public void setSubruleOption(Token key, Token value) {
752:            }
753:
754:            /** Start a new lexer */
755:            public void startLexer(String file, Token name, String super Class,
756:                    String doc) {
757:                if (numLexers > 0) {
758:                    tool
759:                            .fatalError("You may only have one lexer per grammar file: class "
760:                                    + name.getText());
761:                }
762:                numLexers++;
763:                reset();
764:                //System.out.println("Processing lexer '" + name.getText() + "'");
765:                // Does the lexer already exist?
766:                Grammar g = (Grammar) grammars.get(name);
767:                if (g != null) {
768:                    if (!(g instanceof  LexerGrammar)) {
769:                        tool.fatalError("'" + name.getText()
770:                                + "' is already defined as a non-lexer");
771:                    } else {
772:                        tool.fatalError("Lexer '" + name.getText()
773:                                + "' is already defined");
774:                    }
775:                } else {
776:                    // Create a new lexer grammar
777:                    LexerGrammar lg = new LexerGrammar(name.getText(), tool,
778:                            super Class);
779:                    lg.comment = doc;
780:                    lg.processArguments(args);
781:                    lg.setFilename(file);
782:                    grammars.put(lg.getClassName(), lg);
783:                    // Use any preamble action
784:                    lg.preambleAction = thePreambleAction;
785:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
786:                    // This is now the current grammar
787:                    grammar = lg;
788:                }
789:            }
790:
791:            /** Start a new parser */
792:            public void startParser(String file, Token name, String super Class,
793:                    String doc) {
794:                if (numParsers > 0) {
795:                    tool
796:                            .fatalError("You may only have one parser per grammar file: class "
797:                                    + name.getText());
798:                }
799:                numParsers++;
800:                reset();
801:                //System.out.println("Processing parser '" + name.getText() + "'");
802:                // Is this grammar already defined?
803:                Grammar g = (Grammar) grammars.get(name);
804:                if (g != null) {
805:                    if (!(g instanceof  ParserGrammar)) {
806:                        tool.fatalError("'" + name.getText()
807:                                + "' is already defined as a non-parser");
808:                    } else {
809:                        tool.fatalError("Parser '" + name.getText()
810:                                + "' is already defined");
811:                    }
812:                } else {
813:                    // Create a new grammar
814:                    grammar = new ParserGrammar(name.getText(), tool,
815:                            super Class);
816:                    grammar.comment = doc;
817:                    grammar.processArguments(args);
818:                    grammar.setFilename(file);
819:                    grammars.put(grammar.getClassName(), grammar);
820:                    // Use any preamble action
821:                    grammar.preambleAction = thePreambleAction;
822:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
823:                }
824:            }
825:
826:            /** Start a new tree-walker */
827:            public void startTreeWalker(String file, Token name,
828:                    String super Class, String doc) {
829:                if (numTreeParsers > 0) {
830:                    tool
831:                            .fatalError("You may only have one tree parser per grammar file: class "
832:                                    + name.getText());
833:                }
834:                numTreeParsers++;
835:                reset();
836:                //System.out.println("Processing tree-walker '" + name.getText() + "'");
837:                // Is this grammar already defined?
838:                Grammar g = (Grammar) grammars.get(name);
839:                if (g != null) {
840:                    if (!(g instanceof  TreeWalkerGrammar)) {
841:                        tool.fatalError("'" + name.getText()
842:                                + "' is already defined as a non-tree-walker");
843:                    } else {
844:                        tool.fatalError("Tree-walker '" + name.getText()
845:                                + "' is already defined");
846:                    }
847:                } else {
848:                    // Create a new grammar
849:                    grammar = new TreeWalkerGrammar(name.getText(), tool,
850:                            super Class);
851:                    grammar.comment = doc;
852:                    grammar.processArguments(args);
853:                    grammar.setFilename(file);
854:                    grammars.put(grammar.getClassName(), grammar);
855:                    // Use any preamble action
856:                    grammar.preambleAction = thePreambleAction;
857:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
858:                }
859:            }
860:
861:            public void synPred() {
862:            }
863:
864:            public void zeroOrMoreSubRule() {
865:            }
866:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.