Source Code Cross Referenced for DocBookCodeGenerator.java in  » Database-ORM » toplink » persistence » antlr » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Database ORM » toplink » persistence.antlr 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package persistence.antlr;
002:
003:        /* ANTLR Translator Generator
004:         * Project led by Terence Parr at http://www.jGuru.com
005:         * Software rights: http://www.antlr.org/license.html
006:         *
007:         */
008:
009:        /** TODO: strip comments from javadoc entries
010:         */
011:
012:        import java.util.Enumeration;
013:
014:        import persistence.antlr.collections.impl.BitSet;
015:        import persistence.antlr.collections.impl.Vector;
016:
017:        import java.io.PrintWriter; //SAS: changed for proper text file io
018:        import java.io.IOException;
019:        import java.io.FileWriter;
020:
021:        /**Generate P.sgml, a cross-linked representation of P with or without actions */
022:        public class DocBookCodeGenerator extends CodeGenerator {
023:            /** non-zero if inside syntactic predicate generation */
024:            protected int syntacticPredLevel = 0;
025:
026:            /** true during lexer generation, false during parser generation */
027:            protected boolean doingLexRules = false;
028:
029:            protected boolean firstElementInAlt;
030:
031:            protected AlternativeElement prevAltElem = null; // what was generated last?
032:
033:            /** Create a Diagnostic code-generator using the given Grammar
034:             * The caller must still call setTool, setBehavior, and setAnalyzer
035:             * before generating code.
036:             */
037:            public DocBookCodeGenerator() {
038:                super ();
039:                charFormatter = new JavaCharFormatter();
040:            }
041:
042:            /** Encode a string for printing in a HTML document..
043:             * e.g. encode '<' '>' and similar stuff
044:             * @param s the string to encode
045:             */
046:            static String HTMLEncode(String s) {
047:                StringBuffer buf = new StringBuffer();
048:
049:                for (int i = 0, len = s.length(); i < len; i++) {
050:                    char c = s.charAt(i);
051:                    if (c == '&')
052:                        buf.append("&amp;");
053:                    else if (c == '\"')
054:                        buf.append("&quot;");
055:                    else if (c == '\'')
056:                        buf.append("&#039;");
057:                    else if (c == '<')
058:                        buf.append("&lt;");
059:                    else if (c == '>')
060:                        buf.append("&gt;");
061:                    else
062:                        buf.append(c);
063:                }
064:                return buf.toString();
065:            }
066:
067:            /** Encode a string for printing in a HTML document..
068:             * e.g. encode '<' '>' and similar stuff
069:             * @param s the string to encode
070:             */
071:            static String QuoteForId(String s) {
072:                StringBuffer buf = new StringBuffer();
073:
074:                for (int i = 0, len = s.length(); i < len; i++) {
075:                    char c = s.charAt(i);
076:                    if (c == '_')
077:                        buf.append(".");
078:                    else
079:                        buf.append(c);
080:                }
081:                return buf.toString();
082:            }
083:
084:            public void gen() {
085:                // Do the code generation
086:                try {
087:                    // Loop over all grammars
088:                    Enumeration grammarIter = behavior.grammars.elements();
089:                    while (grammarIter.hasMoreElements()) {
090:                        Grammar g = (Grammar) grammarIter.nextElement();
091:
092:                        // Connect all the components to each other
093:                        /*
094:                        g.setGrammarAnalyzer(analyzer);
095:                        analyzer.setGrammar(g);
096:                         */
097:                        g.setCodeGenerator(this );
098:
099:                        // To get right overloading behavior across hetrogeneous grammars
100:                        g.generate();
101:
102:                        if (antlrTool.hasError()) {
103:                            antlrTool.fatalError("Exiting due to errors.");
104:                        }
105:
106:                    }
107:
108:                } catch (IOException e) {
109:                    antlrTool.reportException(e, null);
110:                }
111:            }
112:
113:            /** Generate code for the given grammar element.
114:             * @param blk The {...} action to generate
115:             */
116:            public void gen(ActionElement action) {
117:                // no-op
118:            }
119:
120:            /** Generate code for the given grammar element.
121:             * @param blk The "x|y|z|..." block to generate
122:             */
123:            public void gen(AlternativeBlock blk) {
124:                genGenericBlock(blk, "");
125:            }
126:
127:            /** Generate code for the given grammar element.
128:             * @param blk The block-end element to generate.  Block-end
129:             * elements are synthesized by the grammar parser to represent
130:             * the end of a block.
131:             */
132:            public void gen(BlockEndElement end) {
133:                // no-op
134:            }
135:
136:            /** Generate code for the given grammar element.
137:             * @param blk The character literal reference to generate
138:             */
139:            public void gen(CharLiteralElement atom) {
140:                if (atom.not) {
141:                    _print("~");
142:                }
143:                _print(HTMLEncode(atom.atomText) + " ");
144:            }
145:
146:            /** Generate code for the given grammar element.
147:             * @param blk The character-range reference to generate
148:             */
149:            public void gen(CharRangeElement r) {
150:                print(r.beginText + ".." + r.endText + " ");
151:            }
152:
153:            /** Generate the lexer HTML file */
154:            public void gen(LexerGrammar g) throws IOException {
155:                setGrammar(g);
156:                antlrTool.reportProgress("Generating " + grammar.getClassName()
157:                        + TokenTypesFileExt);
158:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
159:                        + TokenTypesFileExt);
160:                //SAS: changed for proper text file io
161:
162:                tabs = 0;
163:                doingLexRules = true;
164:
165:                // Generate header common to all TXT output files
166:                genHeader();
167:
168:                // Output the user-defined lexer premamble
169:                // RK: guess not..
170:                // println(grammar.preambleAction.getText());
171:
172:                // Generate lexer class definition
173:                println("");
174:
175:                // print javadoc comment if any
176:                if (grammar.comment != null) {
177:                    _println(HTMLEncode(grammar.comment));
178:                }
179:
180:                println("<para>Definition of lexer " + grammar.getClassName()
181:                        + ", which is a subclass of " + grammar.getSuperClass()
182:                        + ".</para>");
183:
184:                // Generate user-defined parser class members
185:                // printAction(grammar.classMemberAction.getText());
186:
187:                /*
188:                // Generate string literals
189:                println("");
190:                println("*** String literals used in the parser");
191:                println("The following string literals were used in the parser.");
192:                println("An actual code generator would arrange to place these literals");
193:                println("into a table in the generated lexer, so that actions in the");
194:                println("generated lexer could match token text against the literals.");
195:                println("String literals used in the lexer are not listed here, as they");
196:                println("are incorporated into the mainstream lexer processing.");
197:                tabs++;
198:                // Enumerate all of the symbols and look for string literal symbols
199:                Enumeration ids = grammar.getSymbols();
200:                while ( ids.hasMoreElements() ) {
201:                	GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
202:                	// Only processing string literals -- reject other symbol entries
203:                	if ( sym instanceof StringLiteralSymbol ) {
204:                		StringLiteralSymbol s = (StringLiteralSymbol)sym;
205:                		println(s.getId() + " = " + s.getTokenType());
206:                	}
207:                }
208:                tabs--;
209:                println("*** End of string literals used by the parser");
210:                 */
211:
212:                // Generate nextToken() rule.
213:                // nextToken() is a synthetic lexer rule that is the implicit OR of all
214:                // user-defined lexer rules.
215:                genNextToken();
216:
217:                // Generate code for each rule in the lexer
218:
219:                Enumeration ids = grammar.rules.elements();
220:                while (ids.hasMoreElements()) {
221:                    RuleSymbol rs = (RuleSymbol) ids.nextElement();
222:                    if (!rs.id.equals("mnextToken")) {
223:                        genRule(rs);
224:                    }
225:                }
226:
227:                // Close the lexer output file
228:                currentOutput.close();
229:                currentOutput = null;
230:                doingLexRules = false;
231:            }
232:
233:            /** Generate code for the given grammar element.
234:             * @param blk The (...)+ block to generate
235:             */
236:            public void gen(OneOrMoreBlock blk) {
237:                genGenericBlock(blk, "+");
238:            }
239:
240:            /** Generate the parser HTML file */
241:            public void gen(ParserGrammar g) throws IOException {
242:                setGrammar(g);
243:                // Open the output stream for the parser and set the currentOutput
244:                antlrTool.reportProgress("Generating " + grammar.getClassName()
245:                        + ".sgml");
246:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
247:                        + ".sgml");
248:
249:                tabs = 0;
250:
251:                // Generate the header common to all output files.
252:                genHeader();
253:
254:                // Generate parser class definition
255:                println("");
256:
257:                // print javadoc comment if any
258:                if (grammar.comment != null) {
259:                    _println(HTMLEncode(grammar.comment));
260:                }
261:
262:                println("<para>Definition of parser " + grammar.getClassName()
263:                        + ", which is a subclass of " + grammar.getSuperClass()
264:                        + ".</para>");
265:
266:                // Enumerate the parser rules
267:                Enumeration rules = grammar.rules.elements();
268:                while (rules.hasMoreElements()) {
269:                    println("");
270:                    // Get the rules from the list and downcast it to proper type
271:                    GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
272:                    // Only process parser rules
273:                    if (sym instanceof  RuleSymbol) {
274:                        genRule((RuleSymbol) sym);
275:                    }
276:                }
277:                tabs--;
278:                println("");
279:
280:                genTail();
281:
282:                // Close the parser output stream
283:                currentOutput.close();
284:                currentOutput = null;
285:            }
286:
287:            /** Generate code for the given grammar element.
288:             * @param blk The rule-reference to generate
289:             */
290:            public void gen(RuleRefElement rr) {
291:                RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule);
292:
293:                // Generate the actual rule description
294:                _print("<link linkend=\"" + QuoteForId(rr.targetRule) + "\">");
295:                _print(rr.targetRule);
296:                _print("</link>");
297:                // RK: Leave out args..
298:                //	if (rr.args != null) {
299:                //		_print("["+rr.args+"]");
300:                //	}
301:                _print(" ");
302:            }
303:
304:            /** Generate code for the given grammar element.
305:             * @param blk The string-literal reference to generate
306:             */
307:            public void gen(StringLiteralElement atom) {
308:                if (atom.not) {
309:                    _print("~");
310:                }
311:                _print(HTMLEncode(atom.atomText));
312:                _print(" ");
313:            }
314:
315:            /** Generate code for the given grammar element.
316:             * @param blk The token-range reference to generate
317:             */
318:            public void gen(TokenRangeElement r) {
319:                print(r.beginText + ".." + r.endText + " ");
320:            }
321:
322:            /** Generate code for the given grammar element.
323:             * @param blk The token-reference to generate
324:             */
325:            public void gen(TokenRefElement atom) {
326:                if (atom.not) {
327:                    _print("~");
328:                }
329:                _print(atom.atomText);
330:                _print(" ");
331:            }
332:
333:            public void gen(TreeElement t) {
334:                print(t + " ");
335:            }
336:
337:            /** Generate the tree-walker TXT file */
338:            public void gen(TreeWalkerGrammar g) throws IOException {
339:                setGrammar(g);
340:                // Open the output stream for the parser and set the currentOutput
341:                antlrTool.reportProgress("Generating " + grammar.getClassName()
342:                        + ".sgml");
343:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
344:                        + ".sgml");
345:                //SAS: changed for proper text file io
346:
347:                tabs = 0;
348:
349:                // Generate the header common to all output files.
350:                genHeader();
351:
352:                // Output the user-defined parser premamble
353:                println("");
354:                //		println("*** Tree-walker Preamble Action.");
355:                //		println("This action will appear before the declaration of your tree-walker class:");
356:                //		tabs++;
357:                //		println(grammar.preambleAction.getText());
358:                //		tabs--;
359:                //		println("*** End of tree-walker Preamble Action");
360:
361:                // Generate tree-walker class definition
362:                println("");
363:
364:                // print javadoc comment if any
365:                if (grammar.comment != null) {
366:                    _println(HTMLEncode(grammar.comment));
367:                }
368:
369:                println("<para>Definition of tree parser "
370:                        + grammar.getClassName() + ", which is a subclass of "
371:                        + grammar.getSuperClass() + ".</para>");
372:
373:                // Generate user-defined tree-walker class members
374:                //		println("");
375:                //		println("*** User-defined tree-walker class members:");
376:                //		println("These are the member declarations that you defined for your class:");
377:                //		tabs++;
378:                //		printAction(grammar.classMemberAction.getText());
379:                //		tabs--;
380:                //		println("*** End of user-defined tree-walker class members");
381:
382:                // Generate code for each rule in the grammar
383:                println("");
384:                //		println("*** tree-walker rules:");
385:                tabs++;
386:
387:                // Enumerate the tree-walker rules
388:                Enumeration rules = grammar.rules.elements();
389:                while (rules.hasMoreElements()) {
390:                    println("");
391:                    // Get the rules from the list and downcast it to proper type
392:                    GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
393:                    // Only process tree-walker rules
394:                    if (sym instanceof  RuleSymbol) {
395:                        genRule((RuleSymbol) sym);
396:                    }
397:                }
398:                tabs--;
399:                println("");
400:                //		println("*** End of tree-walker rules");
401:
402:                //		println("");
403:                //		println("*** End of tree-walker");
404:
405:                // Close the tree-walker output stream
406:                currentOutput.close();
407:                currentOutput = null;
408:            }
409:
410:            /** Generate a wildcard element */
411:            public void gen(WildcardElement wc) {
412:                /*
413:                if ( wc.getLabel()!=null ) {
414:                	_print(wc.getLabel()+"=");
415:                }
416:                 */
417:                _print(". ");
418:            }
419:
420:            /** Generate code for the given grammar element.
421:             * @param blk The (...)* block to generate
422:             */
423:            public void gen(ZeroOrMoreBlock blk) {
424:                genGenericBlock(blk, "*");
425:            }
426:
427:            protected void genAlt(Alternative alt) {
428:                if (alt.getTreeSpecifier() != null) {
429:                    _print(alt.getTreeSpecifier().getText());
430:                }
431:                prevAltElem = null;
432:                for (AlternativeElement elem = alt.head; !(elem instanceof  BlockEndElement); elem = elem.next) {
433:                    elem.generate();
434:                    firstElementInAlt = false;
435:                    prevAltElem = elem;
436:                }
437:            }
438:
439:            /** Generate the header for a block, which may be a RuleBlock or a
440:             * plain AlternativeBLock.  This generates any variable declarations,
441:             * init-actions, and syntactic-predicate-testing variables.
442:             * @blk The block for which the preamble is to be generated.
443:             */
444:            //	protected void genBlockPreamble(AlternativeBlock blk) {
445:            // RK: don't dump out init actions
446:            // dump out init action
447:            //		if ( blk.initAction!=null ) {
448:            //			printAction("{" + blk.initAction + "}");
449:            //		}
450:            //	}
451:            /** Generate common code for a block of alternatives; return a postscript
452:             * that needs to be generated at the end of the block.  Other routines
453:             * may append else-clauses and such for error checking before the postfix
454:             * is generated.
455:             */
456:            public void genCommonBlock(AlternativeBlock blk) {
457:                if (blk.alternatives.size() > 1)
458:                    println("<itemizedlist mark=\"none\">");
459:                for (int i = 0; i < blk.alternatives.size(); i++) {
460:                    Alternative alt = blk.getAlternativeAt(i);
461:                    AlternativeElement elem = alt.head;
462:
463:                    if (blk.alternatives.size() > 1)
464:                        print("<listitem><para>");
465:
466:                    // dump alt operator |
467:                    if (i > 0 && blk.alternatives.size() > 1) {
468:                        _print("| ");
469:                    }
470:
471:                    // Dump the alternative, starting with predicates
472:                    //
473:                    boolean save = firstElementInAlt;
474:                    firstElementInAlt = true;
475:                    tabs++; // in case we do a newline in alt, increase the tab indent
476:
477:                    genAlt(alt);
478:                    tabs--;
479:                    firstElementInAlt = save;
480:                    if (blk.alternatives.size() > 1)
481:                        _println("</para></listitem>");
482:                }
483:                if (blk.alternatives.size() > 1)
484:                    println("</itemizedlist>");
485:            }
486:
487:            /** Generate a textual representation of the follow set
488:             * for a block.
489:             * @param blk  The rule block of interest
490:             */
491:            public void genFollowSetForRuleBlock(RuleBlock blk) {
492:                Lookahead follow = grammar.theLLkAnalyzer
493:                        .FOLLOW(1, blk.endNode);
494:                printSet(grammar.maxk, 1, follow);
495:            }
496:
497:            protected void genGenericBlock(AlternativeBlock blk, String blkOp) {
498:                if (blk.alternatives.size() > 1) {
499:                    // make sure we start on a new line
500:                    _println("");
501:                    if (!firstElementInAlt) {
502:                        // only do newline if the last element wasn't a multi-line block
503:                        //if ( prevAltElem==null ||
504:                        //	 !(prevAltElem instanceof AlternativeBlock) ||
505:                        //	 ((AlternativeBlock)prevAltElem).alternatives.size()==1 )
506:                        //{
507:                        _println("(");
508:                        //}
509:                        //else
510:                        //{
511:                        //	_print("(");
512:                        //}
513:                        // _println("");
514:                        // print("(\t");
515:                    } else {
516:                        _print("(");
517:                    }
518:                } else {
519:                    _print("( ");
520:                }
521:                // RK: don't dump init actions
522:                //	genBlockPreamble(blk);
523:                genCommonBlock(blk);
524:                if (blk.alternatives.size() > 1) {
525:                    _println("");
526:                    print(")" + blkOp + " ");
527:                    // if not last element of alt, need newline & to indent
528:                    if (!(blk.next instanceof  BlockEndElement)) {
529:                        _println("");
530:                        print("");
531:                    }
532:                } else {
533:                    _print(")" + blkOp + " ");
534:                }
535:            }
536:
537:            /** Generate a header that is common to all TXT files */
538:            protected void genHeader() {
539:                println("<?xml version=\"1.0\" standalone=\"no\"?>");
540:                println("<!DOCTYPE book PUBLIC \"-//OASIS//DTD DocBook V3.1//EN\">");
541:                println("<book lang=\"en\">");
542:                println("<bookinfo>");
543:                println("<title>Grammar " + grammar.getClassName() + "</title>");
544:                println("  <author>");
545:                println("    <firstname></firstname>");
546:                println("    <othername></othername>");
547:                println("    <surname></surname>");
548:                println("    <affiliation>");
549:                println("     <address>");
550:                println("     <email></email>");
551:                println("     </address>");
552:                println("    </affiliation>");
553:                println("  </author>");
554:                println("  <othercredit>");
555:                println("    <contrib>");
556:                println("    Generated by <ulink url=\"http://www.ANTLR.org/\">ANTLR</ulink>"
557:                        + antlrTool.version);
558:                println("    from " + antlrTool.grammarFile);
559:                println("    </contrib>");
560:                println("  </othercredit>");
561:                println("  <pubdate></pubdate>");
562:                println("  <abstract>");
563:                println("  <para>");
564:                println("  </para>");
565:                println("  </abstract>");
566:                println("</bookinfo>");
567:                println("<chapter>");
568:                println("<title></title>");
569:            }
570:
571:            /**Generate the lookahead set for an alternate. */
572:            protected void genLookaheadSetForAlt(Alternative alt) {
573:                if (doingLexRules && alt.cache[1].containsEpsilon()) {
574:                    println("MATCHES ALL");
575:                    return;
576:                }
577:                int depth = alt.lookaheadDepth;
578:                if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
579:                    // if the decision is nondeterministic, do the best we can: LL(k)
580:                    // any predicates that are around will be generated later.
581:                    depth = grammar.maxk;
582:                }
583:                for (int i = 1; i <= depth; i++) {
584:                    Lookahead lookahead = alt.cache[i];
585:                    printSet(depth, i, lookahead);
586:                }
587:            }
588:
589:            /** Generate a textual representation of the lookahead set
590:             * for a block.
591:             * @param blk  The block of interest
592:             */
593:            public void genLookaheadSetForBlock(AlternativeBlock blk) {
594:                // Find the maximal lookahead depth over all alternatives
595:                int depth = 0;
596:                for (int i = 0; i < blk.alternatives.size(); i++) {
597:                    Alternative alt = blk.getAlternativeAt(i);
598:                    if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
599:                        depth = grammar.maxk;
600:                        break;
601:                    } else if (depth < alt.lookaheadDepth) {
602:                        depth = alt.lookaheadDepth;
603:                    }
604:                }
605:
606:                for (int i = 1; i <= depth; i++) {
607:                    Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
608:                    printSet(depth, i, lookahead);
609:                }
610:            }
611:
612:            /** Generate the nextToken rule.
613:             * nextToken is a synthetic lexer rule that is the implicit OR of all
614:             * user-defined lexer rules.
615:             */
616:            public void genNextToken() {
617:                println("");
618:                println("/** Lexer nextToken rule:");
619:                println(" *  The lexer nextToken rule is synthesized from all of the user-defined");
620:                println(" *  lexer rules.  It logically consists of one big alternative block with");
621:                println(" *  each user-defined rule being an alternative.");
622:                println(" */");
623:
624:                // Create the synthesized rule block for nextToken consisting
625:                // of an alternate block containing all the user-defined lexer rules.
626:                RuleBlock blk = MakeGrammar.createNextTokenRule(grammar,
627:                        grammar.rules, "nextToken");
628:
629:                // Define the nextToken rule symbol
630:                RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
631:                nextTokenRs.setDefined();
632:                nextTokenRs.setBlock(blk);
633:                nextTokenRs.access = "private";
634:                grammar.define(nextTokenRs);
635:
636:                /*
637:                // Analyze the synthesized block
638:                if (!grammar.theLLkAnalyzer.deterministic(blk))
639:                {
640:                	println("The grammar analyzer has determined that the synthesized");
641:                	println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
642:                	println("This means that there is some overlap of the character");
643:                	println("lookahead for two or more of your lexer rules.");
644:                }
645:                 */
646:
647:                genCommonBlock(blk);
648:            }
649:
650:            /** Generate code for a named rule block
651:             * @param s The RuleSymbol describing the rule to generate
652:             */
653:            public void genRule(RuleSymbol s) {
654:                if (s == null || !s.isDefined())
655:                    return; // undefined rule
656:                println("");
657:
658:                if (s.access.length() != 0) {
659:                    if (!s.access.equals("public")) {
660:                        _print("<para>" + s.access + " </para>");
661:                    }
662:                }
663:
664:                println("<section id=\"" + QuoteForId(s.getId()) + "\">");
665:                println("<title>" + s.getId() + "</title>");
666:                if (s.comment != null) {
667:                    _println("<para>" + HTMLEncode(s.comment) + "</para>");
668:                }
669:                println("<para>");
670:
671:                // Get rule return type and arguments
672:                RuleBlock rblk = s.getBlock();
673:
674:                // RK: for HTML output not of much value...
675:                // Gen method return value(s)
676:                //		if (rblk.returnAction != null) {
677:                //			_print("["+rblk.returnAction+"]");
678:                //		}
679:                // Gen arguments
680:                //		if (rblk.argAction != null)
681:                //		{
682:                //				_print(" returns [" + rblk.argAction+"]");
683:                //		}
684:                _println("");
685:                print(s.getId() + ":\t");
686:                tabs++;
687:
688:                // Dump any init-action
689:                // genBlockPreamble(rblk);
690:
691:                // Dump the alternates of the rule
692:                genCommonBlock(rblk);
693:
694:                _println("");
695:                //		println(";");
696:                tabs--;
697:                _println("</para>");
698:                _println("</section><!-- section \"" + s.getId() + "\" -->");
699:            }
700:
701:            /** Generate the syntactic predicate.  This basically generates
702:             * the alternative block, buts tracks if we are inside a synPred
703:             * @param blk  The syntactic predicate block
704:             */
705:            protected void genSynPred(SynPredBlock blk) {
706:                // no op
707:            }
708:
709:            public void genTail() {
710:                println("</chapter>");
711:                println("</book>");
712:            }
713:
714:            /** Generate the token types TXT file */
715:            protected void genTokenTypes(TokenManager tm) throws IOException {
716:                // Open the token output TXT file and set the currentOutput stream
717:                antlrTool.reportProgress("Generating " + tm.getName()
718:                        + TokenTypesFileSuffix + TokenTypesFileExt);
719:                currentOutput = antlrTool.openOutputFile(tm.getName()
720:                        + TokenTypesFileSuffix + TokenTypesFileExt);
721:                //SAS: changed for proper text file io
722:                tabs = 0;
723:
724:                // Generate the header common to all diagnostic files
725:                genHeader();
726:
727:                // Generate a string for each token.  This creates a static
728:                // array of Strings indexed by token type.
729:                println("");
730:                println("*** Tokens used by the parser");
731:                println("This is a list of the token numeric values and the corresponding");
732:                println("token identifiers.  Some tokens are literals, and because of that");
733:                println("they have no identifiers.  Literals are double-quoted.");
734:                tabs++;
735:
736:                // Enumerate all the valid token types
737:                Vector v = tm.getVocabulary();
738:                for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
739:                    String s = (String) v.elementAt(i);
740:                    if (s != null) {
741:                        println(s + " = " + i);
742:                    }
743:                }
744:
745:                // Close the interface
746:                tabs--;
747:                println("*** End of tokens used by the parser");
748:
749:                // Close the tokens output file
750:                currentOutput.close();
751:                currentOutput = null;
752:            }
753:
754:            /// unused.
755:            protected String processActionForSpecialSymbols(String actionStr,
756:                    int line, RuleBlock currentRule, ActionTransInfo tInfo) {
757:                return actionStr;
758:            }
759:
760:            /** Get a string for an expression to generate creation of an AST subtree.
761:             * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
762:             */
763:            public String getASTCreateString(Vector v) {
764:                return null;
765:            }
766:
767:            /** Get a string for an expression to generate creating of an AST node
768:             * @param str The arguments to the AST constructor
769:             */
770:            public String getASTCreateString(GrammarAtom atom, String str) {
771:                return null;
772:            }
773:
774:            /** Map an identifier to it's corresponding tree-node variable.
775:             * This is context-sensitive, depending on the rule and alternative
776:             * being generated
777:             * @param id The identifier name to map
778:             * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
779:             */
780:            public String mapTreeId(String id, ActionTransInfo tInfo) {
781:                return id;
782:            }
783:
784:            /** Format a lookahead or follow set.
785:             * @param depth The depth of the entire lookahead/follow
786:             * @param k The lookahead level to print
787:             * @param lookahead  The lookahead/follow set to print
788:             */
789:            public void printSet(int depth, int k, Lookahead lookahead) {
790:                int numCols = 5;
791:
792:                int[] elems = lookahead.fset.toArray();
793:
794:                if (depth != 1) {
795:                    print("k==" + k + ": {");
796:                } else {
797:                    print("{ ");
798:                }
799:                if (elems.length > numCols) {
800:                    _println("");
801:                    tabs++;
802:                    print("");
803:                }
804:
805:                int column = 0;
806:                for (int i = 0; i < elems.length; i++) {
807:                    column++;
808:                    if (column > numCols) {
809:                        _println("");
810:                        print("");
811:                        column = 0;
812:                    }
813:                    if (doingLexRules) {
814:                        _print(charFormatter.literalChar(elems[i]));
815:                    } else {
816:                        _print((String) grammar.tokenManager.getVocabulary()
817:                                .elementAt(elems[i]));
818:                    }
819:                    if (i != elems.length - 1) {
820:                        _print(", ");
821:                    }
822:                }
823:
824:                if (elems.length > numCols) {
825:                    _println("");
826:                    tabs--;
827:                    print("");
828:                }
829:                _println(" }");
830:            }
831:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.