Source Code Cross Referenced for DefineGrammarSymbols.java in  » Database-ORM » toplink » persistence » antlr » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Database ORM » toplink » persistence.antlr 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package persistence.antlr;
002:
003:        /* ANTLR Translator Generator
004:         * Project led by Terence Parr at http://www.jGuru.com
005:         * Software rights: http://www.antlr.org/license.html
006:         *
007:         */
008:
009:        import java.util.Hashtable;
010:
011:        import persistence.antlr.collections.impl.BitSet;
012:
013:        /**DefineGrammarSymbols is a behavior for the ANTLRParser that adds all
014:         * the token and rule symbols to the grammar symbol table.
015:         *
016:         * Token types are assigned to token symbols in this class also.
017:         * The token type for a token is done in the order seen (lexically).
018:         */
019:        public class DefineGrammarSymbols implements  ANTLRGrammarParseBehavior {
020:            // Contains all of the defined parser and lexer Grammar's indexed by name
021:            protected Hashtable grammars = new Hashtable();
022:            // Contains all the TokenManagers indexed by name
023:            protected Hashtable tokenManagers = new Hashtable();
024:            // Current grammar (parser or lexer)
025:            protected Grammar grammar;
026:            // The tool under which this is invoked
027:            protected Tool tool;
028:            // The grammar analyzer object
029:            LLkAnalyzer analyzer;
030:            // The command-line arguments passed to the tool.
031:            // This allows each grammar to parse the arguments as it is created
032:            String[] args;
033:            // Name for default token manager does not match any valid name
034:            static final String DEFAULT_TOKENMANAGER_NAME = "*default";
035:            // Header actions apply to all parsers unless redefined
036:            // Contains all of the header actions indexed by name
037:            protected Hashtable headerActions = new Hashtable();
038:            // Place where preamble is stored until a grammar is defined
039:            Token thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); // init to empty token
040:            // The target language
041:            String language = "Java";
042:
043:            protected int numLexers = 0;
044:            protected int numParsers = 0;
045:            protected int numTreeParsers = 0;
046:
047:            public DefineGrammarSymbols(Tool tool_, String[] args_,
048:                    LLkAnalyzer analyzer_) {
049:                tool = tool_;
050:                args = args_;
051:                analyzer = analyzer_;
052:            }
053:
054:            public void _refStringLiteral(Token lit, Token label,
055:                    int autoGenType, boolean lastInRule) {
056:                if (!(grammar instanceof  LexerGrammar)) {
057:                    // String literals are treated like tokens except by the lexer
058:                    String str = lit.getText();
059:                    if (grammar.tokenManager.getTokenSymbol(str) != null) {
060:                        // string symbol is already defined
061:                        return;
062:                    }
063:                    StringLiteralSymbol sl = new StringLiteralSymbol(str);
064:                    int tt = grammar.tokenManager.nextTokenType();
065:                    sl.setTokenType(tt);
066:                    grammar.tokenManager.define(sl);
067:                }
068:            }
069:
070:            /** Reference a token */
071:            public void _refToken(Token assignId, Token t, Token label,
072:                    Token args, boolean inverted, int autoGenType,
073:                    boolean lastInRule) {
074:                String id = t.getText();
075:                if (!grammar.tokenManager.tokenDefined(id)) {
076:                    /*
077:                    // RK: dish out a warning if the token was not defined before.
078:                    tool.warning("Token '" + id + "' defined outside tokens section",
079:                                 tool.grammarFile, t.getLine(), t.getColumn());
080:                     */
081:                    int tt = grammar.tokenManager.nextTokenType();
082:                    TokenSymbol ts = new TokenSymbol(id);
083:                    ts.setTokenType(tt);
084:                    grammar.tokenManager.define(ts);
085:                }
086:            }
087:
088:            /** Abort the processing of a grammar due to syntax errors */
089:            public void abortGrammar() {
090:                if (grammar != null && grammar.getClassName() != null) {
091:                    grammars.remove(grammar.getClassName());
092:                }
093:                grammar = null;
094:            }
095:
096:            public void beginAlt(boolean doAST_) {
097:            }
098:
099:            public void beginChildList() {
100:            }
101:
102:            // Exception handling
103:            public void beginExceptionGroup() {
104:            }
105:
106:            public void beginExceptionSpec(Token label) {
107:            }
108:
109:            public void beginSubRule(Token label, Token start, boolean not) {
110:            }
111:
112:            public void beginTree(Token tok) throws SemanticException {
113:            }
114:
115:            /** Define a lexer or parser rule */
116:            public void defineRuleName(Token r, String access,
117:                    boolean ruleAutoGen, String docComment)
118:                    throws SemanticException {
119:                String id = r.getText();
120:
121:                //		if ( Character.isUpperCase(id.charAt(0)) ) {
122:                if (r.type == ANTLRTokenTypes.TOKEN_REF) {
123:                    // lexer rule
124:                    id = CodeGenerator.encodeLexerRuleName(id);
125:                    // make sure we define it as token identifier also
126:                    if (!grammar.tokenManager.tokenDefined(r.getText())) {
127:                        int tt = grammar.tokenManager.nextTokenType();
128:                        TokenSymbol ts = new TokenSymbol(r.getText());
129:                        ts.setTokenType(tt);
130:                        grammar.tokenManager.define(ts);
131:                    }
132:                }
133:
134:                RuleSymbol rs;
135:                if (grammar.isDefined(id)) {
136:                    // symbol seen before?
137:                    rs = (RuleSymbol) grammar.getSymbol(id);
138:                    // rule just referenced or has it been defined yet?
139:                    if (rs.isDefined()) {
140:                        tool.error("redefinition of rule " + id, grammar
141:                                .getFilename(), r.getLine(), r.getColumn());
142:                    }
143:                } else {
144:                    rs = new RuleSymbol(id);
145:                    grammar.define(rs);
146:                }
147:                rs.setDefined();
148:                rs.access = access;
149:                rs.comment = docComment;
150:            }
151:
152:            /** Define a token from tokens {...}.
153:             *  Must be label and literal or just label or just a literal.
154:             */
155:            public void defineToken(Token tokname, Token tokliteral) {
156:                String name = null;
157:                String literal = null;
158:                if (tokname != null) {
159:                    name = tokname.getText();
160:                }
161:                if (tokliteral != null) {
162:                    literal = tokliteral.getText();
163:                }
164:                // System.out.println("defining " + name + " with literal " + literal);
165:                //
166:                if (literal != null) {
167:                    StringLiteralSymbol sl = (StringLiteralSymbol) grammar.tokenManager
168:                            .getTokenSymbol(literal);
169:                    if (sl != null) {
170:                        // This literal is known already.
171:                        // If the literal has no label already, but we can provide
172:                        // one here, then no problem, just map the label to the literal
173:                        // and don't change anything else.
174:                        // Otherwise, labels conflict: error.
175:                        if (name == null || sl.getLabel() != null) {
176:                            tool.warning(
177:                                    "Redefinition of literal in tokens {...}: "
178:                                            + literal, grammar.getFilename(),
179:                                    tokliteral.getLine(), tokliteral
180:                                            .getColumn());
181:                            return;
182:                        } else if (name != null) {
183:                            // The literal had no label, but new def does.  Set it.
184:                            sl.setLabel(name);
185:                            // Also, map the label to the literal.
186:                            grammar.tokenManager.mapToTokenSymbol(name, sl);
187:                        }
188:                    }
189:                    // if they provide a name/label and that name/label already
190:                    // exists, just hook this literal onto old token.
191:                    if (name != null) {
192:                        TokenSymbol ts = (TokenSymbol) grammar.tokenManager
193:                                .getTokenSymbol(name);
194:                        if (ts != null) {
195:                            // watch out that the label is not more than just a token.
196:                            // If it already has a literal attached, then: conflict.
197:                            if (ts instanceof  StringLiteralSymbol) {
198:                                tool.warning(
199:                                        "Redefinition of token in tokens {...}: "
200:                                                + name, grammar.getFilename(),
201:                                        tokliteral.getLine(), tokliteral
202:                                                .getColumn());
203:                                return;
204:                            }
205:                            // a simple token symbol such as DECL is defined
206:                            // must convert it to a StringLiteralSymbol with a
207:                            // label by co-opting token type and killing old
208:                            // TokenSymbol.  Kill mapping and entry in vector
209:                            // of token manager.
210:                            // First, claim token type.
211:                            int ttype = ts.getTokenType();
212:                            // now, create string literal with label
213:                            sl = new StringLiteralSymbol(literal);
214:                            sl.setTokenType(ttype);
215:                            sl.setLabel(name);
216:                            // redefine this critter as a string literal
217:                            grammar.tokenManager.define(sl);
218:                            // make sure the label can be used also.
219:                            grammar.tokenManager.mapToTokenSymbol(name, sl);
220:                            return;
221:                        }
222:                        // here, literal was labeled but not by a known token symbol.
223:                    }
224:                    sl = new StringLiteralSymbol(literal);
225:                    int tt = grammar.tokenManager.nextTokenType();
226:                    sl.setTokenType(tt);
227:                    sl.setLabel(name);
228:                    grammar.tokenManager.define(sl);
229:                    if (name != null) {
230:                        // make the label point at token symbol too
231:                        grammar.tokenManager.mapToTokenSymbol(name, sl);
232:                    }
233:                }
234:
235:                // create a token in the token manager not a literal
236:                else {
237:                    if (grammar.tokenManager.tokenDefined(name)) {
238:                        tool.warning("Redefinition of token in tokens {...}: "
239:                                + name, grammar.getFilename(), tokname
240:                                .getLine(), tokname.getColumn());
241:                        return;
242:                    }
243:                    int tt = grammar.tokenManager.nextTokenType();
244:                    TokenSymbol ts = new TokenSymbol(name);
245:                    ts.setTokenType(tt);
246:                    grammar.tokenManager.define(ts);
247:                }
248:            }
249:
250:            public void endAlt() {
251:            }
252:
253:            public void endChildList() {
254:            }
255:
256:            public void endExceptionGroup() {
257:            }
258:
259:            public void endExceptionSpec() {
260:            }
261:
262:            public void endGrammar() {
263:            }
264:
265:            /** Called after the optional options section, to compensate for
266:             * options that may not have been set.
267:             * This method is bigger than it needs to be, but is much more
268:             * clear if I delineate all the cases.
269:             */
270:            public void endOptions() {
271:                // NO VOCAB OPTIONS
272:                if (grammar.exportVocab == null && grammar.importVocab == null) {
273:                    grammar.exportVocab = grammar.getClassName();
274:                    // Can we get initial vocab from default shared vocab?
275:                    if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
276:                        // Use the already-defined token manager
277:                        grammar.exportVocab = DEFAULT_TOKENMANAGER_NAME;
278:                        TokenManager tm = (TokenManager) tokenManagers
279:                                .get(DEFAULT_TOKENMANAGER_NAME);
280:                        // System.out.println("No tokenVocabulary for '" + grammar.getClassName() + "', using default '" + tm.getName() + "'");
281:                        grammar.setTokenManager(tm);
282:                        return;
283:                    }
284:                    // no shared vocab for file, make new one
285:                    // System.out.println("No exportVocab for '" + grammar.getClassName() + "', creating default '" + grammar.exportVocab + "'");
286:                    TokenManager tm = new SimpleTokenManager(
287:                            grammar.exportVocab, tool);
288:                    grammar.setTokenManager(tm);
289:                    // Add the token manager to the list of token managers
290:                    tokenManagers.put(grammar.exportVocab, tm);
291:                    // no default vocab, so make this the default vocab
292:                    tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
293:                    return;
294:                }
295:
296:                // NO OUTPUT, BUT HAS INPUT VOCAB
297:                if (grammar.exportVocab == null && grammar.importVocab != null) {
298:                    grammar.exportVocab = grammar.getClassName();
299:                    // first make sure input!=output
300:                    if (grammar.importVocab.equals(grammar.exportVocab)) {
301:                        tool
302:                                .warning("Grammar "
303:                                        + grammar.getClassName()
304:                                        + " cannot have importVocab same as default output vocab (grammar name); ignored.");
305:                        // kill importVocab option and try again: use default vocab
306:                        grammar.importVocab = null;
307:                        endOptions();
308:                        return;
309:                    }
310:                    // check to see if the vocab is already in memory
311:                    // (defined by another grammar in the file).  Not normal situation.
312:                    if (tokenManagers.containsKey(grammar.importVocab)) {
313:                        // make a copy since we'll be generating a new output vocab
314:                        // and we don't want to affect this one.  Set the name to
315:                        // the default output vocab==classname.
316:                        TokenManager tm = (TokenManager) tokenManagers
317:                                .get(grammar.importVocab);
318:                        // System.out.println("Duping importVocab of " + grammar.importVocab);
319:                        TokenManager dup = (TokenManager) tm.clone();
320:                        dup.setName(grammar.exportVocab);
321:                        // System.out.println("Setting name to " + grammar.exportVocab);
322:                        dup.setReadOnly(false);
323:                        grammar.setTokenManager(dup);
324:                        tokenManagers.put(grammar.exportVocab, dup);
325:                        return;
326:                    }
327:                    // System.out.println("reading in vocab "+grammar.importVocab);
328:                    // Must be a file, go get it.
329:                    ImportVocabTokenManager tm = new ImportVocabTokenManager(
330:                            grammar, grammar.importVocab
331:                                    + CodeGenerator.TokenTypesFileSuffix
332:                                    + CodeGenerator.TokenTypesFileExt,
333:                            grammar.exportVocab, tool);
334:                    tm.setReadOnly(false); // since renamed, can write out
335:                    // Add this token manager to the list so its tokens will be generated
336:                    tokenManagers.put(grammar.exportVocab, tm);
337:                    // System.out.println("vocab renamed to default output vocab of "+tm.getName());
338:                    // Assign the token manager to this grammar.
339:                    grammar.setTokenManager(tm);
340:
341:                    // set default vocab if none
342:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
343:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
344:                    }
345:
346:                    return;
347:                }
348:
349:                // OUTPUT VOCAB, BUT NO INPUT VOCAB
350:                if (grammar.exportVocab != null && grammar.importVocab == null) {
351:                    // share with previous vocab if it exists
352:                    if (tokenManagers.containsKey(grammar.exportVocab)) {
353:                        // Use the already-defined token manager
354:                        TokenManager tm = (TokenManager) tokenManagers
355:                                .get(grammar.exportVocab);
356:                        // System.out.println("Sharing exportVocab of " + grammar.exportVocab);
357:                        grammar.setTokenManager(tm);
358:                        return;
359:                    }
360:                    // create new output vocab
361:                    // System.out.println("Creating exportVocab " + grammar.exportVocab);
362:                    TokenManager tm = new SimpleTokenManager(
363:                            grammar.exportVocab, tool);
364:                    grammar.setTokenManager(tm);
365:                    // Add the token manager to the list of token managers
366:                    tokenManagers.put(grammar.exportVocab, tm);
367:                    // set default vocab if none
368:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
369:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
370:                    }
371:                    return;
372:                }
373:
374:                // BOTH INPUT AND OUTPUT VOCAB
375:                if (grammar.exportVocab != null && grammar.importVocab != null) {
376:                    // don't want input==output
377:                    if (grammar.importVocab.equals(grammar.exportVocab)) {
378:                        tool
379:                                .error("exportVocab of "
380:                                        + grammar.exportVocab
381:                                        + " same as importVocab; probably not what you want");
382:                    }
383:                    // does the input vocab already exist in memory?
384:                    if (tokenManagers.containsKey(grammar.importVocab)) {
385:                        // make a copy since we'll be generating a new output vocab
386:                        // and we don't want to affect this one.
387:                        TokenManager tm = (TokenManager) tokenManagers
388:                                .get(grammar.importVocab);
389:                        // System.out.println("Duping importVocab of " + grammar.importVocab);
390:                        TokenManager dup = (TokenManager) tm.clone();
391:                        dup.setName(grammar.exportVocab);
392:                        // System.out.println("Setting name to " + grammar.exportVocab);
393:                        dup.setReadOnly(false);
394:                        grammar.setTokenManager(dup);
395:                        tokenManagers.put(grammar.exportVocab, dup);
396:                        return;
397:                    }
398:                    // Must be a file, go get it.
399:                    ImportVocabTokenManager tm = new ImportVocabTokenManager(
400:                            grammar, grammar.importVocab
401:                                    + CodeGenerator.TokenTypesFileSuffix
402:                                    + CodeGenerator.TokenTypesFileExt,
403:                            grammar.exportVocab, tool);
404:                    tm.setReadOnly(false); // write it out as we've changed name
405:                    // Add this token manager to the list so its tokens will be generated
406:                    tokenManagers.put(grammar.exportVocab, tm);
407:                    // Assign the token manager to this grammar.
408:                    grammar.setTokenManager(tm);
409:
410:                    // set default vocab if none
411:                    if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
412:                        tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
413:                    }
414:
415:                    return;
416:                }
417:            }
418:
419:            public void endRule(String r) {
420:            }
421:
422:            public void endSubRule() {
423:            }
424:
425:            public void endTree() {
426:            }
427:
428:            public void hasError() {
429:            }
430:
431:            public void noASTSubRule() {
432:            }
433:
434:            public void oneOrMoreSubRule() {
435:            }
436:
437:            public void optionalSubRule() {
438:            }
439:
440:            public void setUserExceptions(String thr) {
441:            }
442:
443:            public void refAction(Token action) {
444:            }
445:
446:            public void refArgAction(Token action) {
447:            }
448:
449:            public void refCharLiteral(Token lit, Token label,
450:                    boolean inverted, int autoGenType, boolean lastInRule) {
451:            }
452:
453:            public void refCharRange(Token t1, Token t2, Token label,
454:                    int autoGenType, boolean lastInRule) {
455:            }
456:
457:            public void refElementOption(Token option, Token value) {
458:            }
459:
460:            public void refTokensSpecElementOption(Token tok, Token option,
461:                    Token value) {
462:            }
463:
464:            public void refExceptionHandler(Token exTypeAndName, Token action) {
465:            }
466:
467:            // Header action applies to all parsers and lexers.
468:            public void refHeaderAction(Token name, Token act) {
469:                String key;
470:
471:                if (name == null)
472:                    key = "";
473:                else
474:                    key = StringUtils
475:                            .stripFrontBack(name.getText(), "\"", "\"");
476:
477:                // FIXME: depending on the mode the inserted header actions should
478:                // be checked for sanity.
479:                if (headerActions.containsKey(key)) {
480:                    if (key.equals(""))
481:                        tool.error(act.getLine()
482:                                + ": header action already defined");
483:                    else
484:                        tool.error(act.getLine() + ": header action '" + key
485:                                + "' already defined");
486:                }
487:                headerActions.put(key, act);
488:            }
489:
490:            public String getHeaderAction(String name) {
491:                Token t = (Token) headerActions.get(name);
492:                if (t == null) {
493:                    return "";
494:                }
495:                return t.getText();
496:            }
497:
498:            public void refInitAction(Token action) {
499:            }
500:
501:            public void refMemberAction(Token act) {
502:            }
503:
504:            public void refPreambleAction(Token act) {
505:                thePreambleAction = act;
506:            }
507:
508:            public void refReturnAction(Token returnAction) {
509:            }
510:
511:            public void refRule(Token idAssign, Token r, Token label,
512:                    Token args, int autoGenType) {
513:                String id = r.getText();
514:                //		if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
515:                if (r.type == ANTLRTokenTypes.TOKEN_REF) {
516:                    // lexer rule?
517:                    id = CodeGenerator.encodeLexerRuleName(id);
518:                }
519:                if (!grammar.isDefined(id)) {
520:                    grammar.define(new RuleSymbol(id));
521:                }
522:            }
523:
524:            public void refSemPred(Token pred) {
525:            }
526:
527:            public void refStringLiteral(Token lit, Token label,
528:                    int autoGenType, boolean lastInRule) {
529:                _refStringLiteral(lit, label, autoGenType, lastInRule);
530:            }
531:
532:            /** Reference a token */
533:            public void refToken(Token assignId, Token t, Token label,
534:                    Token args, boolean inverted, int autoGenType,
535:                    boolean lastInRule) {
536:                _refToken(assignId, t, label, args, inverted, autoGenType,
537:                        lastInRule);
538:            }
539:
540:            public void refTokenRange(Token t1, Token t2, Token label,
541:                    int autoGenType, boolean lastInRule) {
542:                // ensure that the DefineGrammarSymbols methods are called; otherwise a range addes more
543:                // token refs to the alternative by calling MakeGrammar.refToken etc...
544:                if (t1.getText().charAt(0) == '"') {
545:                    refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE,
546:                            lastInRule);
547:                } else {
548:                    _refToken(null, t1, null, null, false,
549:                            GrammarElement.AUTO_GEN_NONE, lastInRule);
550:                }
551:                if (t2.getText().charAt(0) == '"') {
552:                    _refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE,
553:                            lastInRule);
554:                } else {
555:                    _refToken(null, t2, null, null, false,
556:                            GrammarElement.AUTO_GEN_NONE, lastInRule);
557:                }
558:            }
559:
560:            public void refTreeSpecifier(Token treeSpec) {
561:            }
562:
563:            public void refWildcard(Token t, Token label, int autoGenType) {
564:            }
565:
566:            /** Get ready to process a new grammar */
567:            public void reset() {
568:                grammar = null;
569:            }
570:
571:            public void setArgOfRuleRef(Token argaction) {
572:            }
573:
574:            /** Set the character vocabulary for a lexer */
575:            public void setCharVocabulary(BitSet b) {
576:                // grammar should enforce that this is only called for lexer
577:                ((LexerGrammar) grammar).setCharVocabulary(b);
578:            }
579:
580:            /** setFileOption: Associate an option value with a key.
581:             * This applies to options for an entire grammar file.
582:             * @param key The token containing the option name
583:             * @param value The token containing the option value.
584:             */
585:            public void setFileOption(Token key, Token value, String filename) {
586:                if (key.getText().equals("language")) {
587:                    if (value.getType() == ANTLRParser.STRING_LITERAL) {
588:                        language = StringUtils.stripBack(StringUtils
589:                                .stripFront(value.getText(), '"'), '"');
590:                    } else if (value.getType() == ANTLRParser.TOKEN_REF
591:                            || value.getType() == ANTLRParser.RULE_REF) {
592:                        language = value.getText();
593:                    } else {
594:                        tool.error(
595:                                "language option must be string or identifier",
596:                                filename, value.getLine(), value.getColumn());
597:                    }
598:                } else if (key.getText().equals("mangleLiteralPrefix")) {
599:                    if (value.getType() == ANTLRParser.STRING_LITERAL) {
600:                        tool.literalsPrefix = StringUtils.stripFrontBack(value
601:                                .getText(), "\"", "\"");
602:                    } else {
603:                        tool.error("mangleLiteralPrefix option must be string",
604:                                filename, value.getLine(), value.getColumn());
605:                    }
606:                } else if (key.getText().equals("upperCaseMangledLiterals")) {
607:                    if (value.getText().equals("true")) {
608:                        tool.upperCaseMangledLiterals = true;
609:                    } else if (value.getText().equals("false")) {
610:                        tool.upperCaseMangledLiterals = false;
611:                    } else {
612:                        grammar.antlrTool
613:                                .error(
614:                                        "Value for upperCaseMangledLiterals must be true or false",
615:                                        filename, key.getLine(), key
616:                                                .getColumn());
617:                    }
618:                } else if (key.getText().equals("namespaceStd")
619:                        || key.getText().equals("namespaceAntlr")
620:                        || key.getText().equals("genHashLines")) {
621:                    if (!language.equals("Cpp")) {
622:                        tool.error(
623:                                key.getText() + " option only valid for C++",
624:                                filename, key.getLine(), key.getColumn());
625:                    } else {
626:                        if (key.getText().equals("noConstructors")) {
627:                            if (!(value.getText().equals("true") || value
628:                                    .getText().equals("false")))
629:                                tool
630:                                        .error(
631:                                                "noConstructors option must be true or false",
632:                                                filename, value.getLine(),
633:                                                value.getColumn());
634:                            tool.noConstructors = value.getText()
635:                                    .equals("true");
636:                        } else if (key.getText().equals("genHashLines")) {
637:                            if (!(value.getText().equals("true") || value
638:                                    .getText().equals("false")))
639:                                tool
640:                                        .error(
641:                                                "genHashLines option must be true or false",
642:                                                filename, value.getLine(),
643:                                                value.getColumn());
644:                            tool.genHashLines = value.getText().equals("true");
645:                        } else {
646:                            if (value.getType() != ANTLRParser.STRING_LITERAL) {
647:                                tool.error(key.getText()
648:                                        + " option must be a string", filename,
649:                                        value.getLine(), value.getColumn());
650:                            } else {
651:                                if (key.getText().equals("namespaceStd"))
652:                                    tool.namespaceStd = value.getText();
653:                                else if (key.getText().equals("namespaceAntlr"))
654:                                    tool.namespaceAntlr = value.getText();
655:                            }
656:                        }
657:                    }
658:                } else if (key.getText().equals("namespace")) {
659:                    if (!language.equals("Cpp") && !language.equals("CSharp")) {
660:                        tool
661:                                .error(
662:                                        key.getText()
663:                                                + " option only valid for C++ and C# (a.k.a CSharp)",
664:                                        filename, key.getLine(), key
665:                                                .getColumn());
666:                    } else {
667:                        if (value.getType() != ANTLRParser.STRING_LITERAL) {
668:                            tool.error(key.getText()
669:                                    + " option must be a string", filename,
670:                                    value.getLine(), value.getColumn());
671:                        } else {
672:                            if (key.getText().equals("namespace"))
673:                                tool.setNameSpace(value.getText());
674:                        }
675:                    }
676:                } else {
677:                    tool.error("Invalid file-level option: " + key.getText(),
678:                            filename, key.getLine(), value.getColumn());
679:                }
680:            }
681:
682:            /** setGrammarOption: Associate an option value with a key.
683:             * This function forwards to Grammar.setOption for some options.
684:             * @param key The token containing the option name
685:             * @param value The token containing the option value.
686:             */
687:            public void setGrammarOption(Token key, Token value) {
688:                if (key.getText().equals("tokdef")
689:                        || key.getText().equals("tokenVocabulary")) {
690:                    tool
691:                            .error(
692:                                    "tokdef/tokenVocabulary options are invalid >= ANTLR 2.6.0.\n"
693:                                            + "  Use importVocab/exportVocab instead.  Please see the documentation.\n"
694:                                            + "  The previous options were so heinous that Terence changed the whole\n"
695:                                            + "  vocabulary mechanism; it was better to change the names rather than\n"
696:                                            + "  subtly change the functionality of the known options.  Sorry!",
697:                                    grammar.getFilename(), value.getLine(),
698:                                    value.getColumn());
699:                } else if (key.getText().equals("literal")
700:                        && grammar instanceof  LexerGrammar) {
701:                    tool
702:                            .error(
703:                                    "the literal option is invalid >= ANTLR 2.6.0.\n"
704:                                            + "  Use the \"tokens {...}\" mechanism instead.",
705:                                    grammar.getFilename(), value.getLine(),
706:                                    value.getColumn());
707:                } else if (key.getText().equals("exportVocab")) {
708:                    // Set the token manager associated with the parser
709:                    if (value.getType() == ANTLRParser.RULE_REF
710:                            || value.getType() == ANTLRParser.TOKEN_REF) {
711:                        grammar.exportVocab = value.getText();
712:                    } else {
713:                        tool.error("exportVocab must be an identifier", grammar
714:                                .getFilename(), value.getLine(), value
715:                                .getColumn());
716:                    }
717:                } else if (key.getText().equals("importVocab")) {
718:                    if (value.getType() == ANTLRParser.RULE_REF
719:                            || value.getType() == ANTLRParser.TOKEN_REF) {
720:                        grammar.importVocab = value.getText();
721:                    } else {
722:                        tool.error("importVocab must be an identifier", grammar
723:                                .getFilename(), value.getLine(), value
724:                                .getColumn());
725:                    }
726:                } else {
727:                    // Forward all unrecognized options to the grammar
728:                    grammar.setOption(key.getText(), value);
729:                }
730:            }
731:
732:            public void setRuleOption(Token key, Token value) {
733:            }
734:
735:            public void setSubruleOption(Token key, Token value) {
736:            }
737:
738:            /** Start a new lexer */
739:            public void startLexer(String file, Token name, String super Class,
740:                    String doc) {
741:                if (numLexers > 0) {
742:                    tool
743:                            .panic("You may only have one lexer per grammar file: class "
744:                                    + name.getText());
745:                }
746:                numLexers++;
747:                reset();
748:                //System.out.println("Processing lexer '" + name.getText() + "'");
749:                // Does the lexer already exist?
750:                Grammar g = (Grammar) grammars.get(name);
751:                if (g != null) {
752:                    if (!(g instanceof  LexerGrammar)) {
753:                        tool.panic("'" + name.getText()
754:                                + "' is already defined as a non-lexer");
755:                    } else {
756:                        tool.panic("Lexer '" + name.getText()
757:                                + "' is already defined");
758:                    }
759:                } else {
760:                    // Create a new lexer grammar
761:                    LexerGrammar lg = new LexerGrammar(name.getText(), tool,
762:                            super Class);
763:                    lg.comment = doc;
764:                    lg.processArguments(args);
765:                    lg.setFilename(file);
766:                    grammars.put(lg.getClassName(), lg);
767:                    // Use any preamble action
768:                    lg.preambleAction = thePreambleAction;
769:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
770:                    // This is now the current grammar
771:                    grammar = lg;
772:                }
773:            }
774:
775:            /** Start a new parser */
776:            public void startParser(String file, Token name, String super Class,
777:                    String doc) {
778:                if (numParsers > 0) {
779:                    tool
780:                            .panic("You may only have one parser per grammar file: class "
781:                                    + name.getText());
782:                }
783:                numParsers++;
784:                reset();
785:                //System.out.println("Processing parser '" + name.getText() + "'");
786:                // Is this grammar already defined?
787:                Grammar g = (Grammar) grammars.get(name);
788:                if (g != null) {
789:                    if (!(g instanceof  ParserGrammar)) {
790:                        tool.panic("'" + name.getText()
791:                                + "' is already defined as a non-parser");
792:                    } else {
793:                        tool.panic("Parser '" + name.getText()
794:                                + "' is already defined");
795:                    }
796:                } else {
797:                    // Create a new grammar
798:                    grammar = new ParserGrammar(name.getText(), tool,
799:                            super Class);
800:                    grammar.comment = doc;
801:                    grammar.processArguments(args);
802:                    grammar.setFilename(file);
803:                    grammars.put(grammar.getClassName(), grammar);
804:                    // Use any preamble action
805:                    grammar.preambleAction = thePreambleAction;
806:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
807:                }
808:            }
809:
810:            /** Start a new tree-walker */
811:            public void startTreeWalker(String file, Token name,
812:                    String super Class, String doc) {
813:                if (numTreeParsers > 0) {
814:                    tool
815:                            .panic("You may only have one tree parser per grammar file: class "
816:                                    + name.getText());
817:                }
818:                numTreeParsers++;
819:                reset();
820:                //System.out.println("Processing tree-walker '" + name.getText() + "'");
821:                // Is this grammar already defined?
822:                Grammar g = (Grammar) grammars.get(name);
823:                if (g != null) {
824:                    if (!(g instanceof  TreeWalkerGrammar)) {
825:                        tool.panic("'" + name.getText()
826:                                + "' is already defined as a non-tree-walker");
827:                    } else {
828:                        tool.panic("Tree-walker '" + name.getText()
829:                                + "' is already defined");
830:                    }
831:                } else {
832:                    // Create a new grammar
833:                    grammar = new TreeWalkerGrammar(name.getText(), tool,
834:                            super Class);
835:                    grammar.comment = doc;
836:                    grammar.processArguments(args);
837:                    grammar.setFilename(file);
838:                    grammars.put(grammar.getClassName(), grammar);
839:                    // Use any preamble action
840:                    grammar.preambleAction = thePreambleAction;
841:                    thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
842:                }
843:            }
844:
845:            public void synPred() {
846:            }
847:
848:            public void zeroOrMoreSubRule() {
849:            }
850:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.