Source Code Cross Referenced for DocBookCodeGenerator.java in  » IDE-Netbeans » cnd » antlr » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » IDE Netbeans » cnd » antlr 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package antlr;
002:
003:        /* ANTLR Translator Generator
004:         * Project led by Terence Parr at http://www.cs.usfca.edu
005:         * Software rights: http://www.antlr.org/license.html
006:         */
007:
008:        /** TODO: strip comments from javadoc entries
009:         */
010:
011:        import java.util.Enumeration;
012:
013:        import antlr.collections.impl.BitSet;
014:        import antlr.collections.impl.Vector;
015:
016:        import java.io.PrintWriter; //SAS: changed for proper text file io
017:        import java.io.IOException;
018:        import java.io.FileWriter;
019:
020:        /**Generate P.sgml, a cross-linked representation of P with or without actions */
021:        public class DocBookCodeGenerator extends CodeGenerator {
022:            /** non-zero if inside syntactic predicate generation */
023:            protected int syntacticPredLevel = 0;
024:
025:            /** true during lexer generation, false during parser generation */
026:            protected boolean doingLexRules = false;
027:
028:            protected boolean firstElementInAlt;
029:
030:            protected AlternativeElement prevAltElem = null; // what was generated last?
031:
032:            /** Create a Diagnostic code-generator using the given Grammar
033:             * The caller must still call setTool, setBehavior, and setAnalyzer
034:             * before generating code.
035:             */
036:            public DocBookCodeGenerator() {
037:                super ();
038:                charFormatter = new JavaCharFormatter();
039:            }
040:
041:            /** Encode a string for printing in a HTML document..
042:             * e.g. encode '<' '>' and similar stuff
043:             * @param s the string to encode
044:             */
045:            static String HTMLEncode(String s) {
046:                StringBuffer buf = new StringBuffer();
047:
048:                for (int i = 0, len = s.length(); i < len; i++) {
049:                    char c = s.charAt(i);
050:                    if (c == '&')
051:                        buf.append("&amp;");
052:                    else if (c == '\"')
053:                        buf.append("&quot;");
054:                    else if (c == '\'')
055:                        buf.append("&#039;");
056:                    else if (c == '<')
057:                        buf.append("&lt;");
058:                    else if (c == '>')
059:                        buf.append("&gt;");
060:                    else
061:                        buf.append(c);
062:                }
063:                return buf.toString();
064:            }
065:
066:            /** Encode a string for printing in a HTML document..
067:             * e.g. encode '<' '>' and similar stuff
068:             * @param s the string to encode
069:             */
070:            static String QuoteForId(String s) {
071:                StringBuffer buf = new StringBuffer();
072:
073:                for (int i = 0, len = s.length(); i < len; i++) {
074:                    char c = s.charAt(i);
075:                    if (c == '_')
076:                        buf.append(".");
077:                    else
078:                        buf.append(c);
079:                }
080:                return buf.toString();
081:            }
082:
083:            public void gen() {
084:                // Do the code generation
085:                try {
086:                    // Loop over all grammars
087:                    Enumeration grammarIter = behavior.grammars.elements();
088:                    while (grammarIter.hasMoreElements()) {
089:                        Grammar g = (Grammar) grammarIter.nextElement();
090:
091:                        // Connect all the components to each other
092:                        /*
093:                        g.setGrammarAnalyzer(analyzer);
094:                        analyzer.setGrammar(g);
095:                         */
096:                        g.setCodeGenerator(this );
097:
098:                        // To get right overloading behavior across hetrogeneous grammars
099:                        g.generate();
100:
101:                        if (antlrTool.hasError()) {
102:                            antlrTool.fatalError("Exiting due to errors.");
103:                        }
104:
105:                    }
106:
107:                } catch (IOException e) {
108:                    antlrTool.reportException(e, null);
109:                }
110:            }
111:
112:            /** Generate code for the given grammar element.
113:             * @param blk The {...} action to generate
114:             */
115:            public void gen(ActionElement action, Context context) {
116:                // no-op
117:            }
118:
119:            /** Generate code for the given grammar element.
120:             * @param blk The "x|y|z|..." block to generate
121:             */
122:            public void gen(AlternativeBlock blk, Context context) {
123:                genGenericBlock(blk, "");
124:            }
125:
126:            /** Generate code for the given grammar element.
127:             * @param blk The block-end element to generate.  Block-end
128:             * elements are synthesized by the grammar parser to represent
129:             * the end of a block.
130:             */
131:            public void gen(BlockEndElement end, Context context) {
132:                // no-op
133:            }
134:
135:            /** Generate code for the given grammar element.
136:             * @param blk The character literal reference to generate
137:             */
138:            public void gen(CharLiteralElement atom, Context context) {
139:                if (atom.not) {
140:                    _print("~");
141:                }
142:                _print(HTMLEncode(atom.atomText) + " ");
143:            }
144:
145:            /** Generate code for the given grammar element.
146:             * @param blk The character-range reference to generate
147:             */
148:            public void gen(CharRangeElement r, Context context) {
149:                print(r.beginText + ".." + r.endText + " ");
150:            }
151:
152:            /** Generate the lexer HTML file */
153:            public void gen(LexerGrammar g) throws IOException {
154:                setGrammar(g);
155:                antlrTool.reportProgress("Generating " + grammar.getClassName()
156:                        + ".sgml");
157:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
158:                        + ".sgml");
159:
160:                tabs = 0;
161:                doingLexRules = true;
162:
163:                // Generate header common to all TXT output files
164:                genHeader();
165:
166:                // Output the user-defined lexer premamble
167:                // RK: guess not..
168:                // println(grammar.preambleAction.getText());
169:
170:                // Generate lexer class definition
171:                println("");
172:
173:                // print javadoc comment if any
174:                if (grammar.comment != null) {
175:                    _println(HTMLEncode(grammar.comment));
176:                }
177:
178:                println("<para>Definition of lexer " + grammar.getClassName()
179:                        + ", which is a subclass of " + grammar.getSuperClass()
180:                        + ".</para>");
181:
182:                // Generate user-defined parser class members
183:                // printAction(grammar.classMemberAction.getText());
184:
185:                /*
186:                // Generate string literals
187:                println("");
188:                println("*** String literals used in the parser");
189:                println("The following string literals were used in the parser.");
190:                println("An actual code generator would arrange to place these literals");
191:                println("into a table in the generated lexer, so that actions in the");
192:                println("generated lexer could match token text against the literals.");
193:                println("String literals used in the lexer are not listed here, as they");
194:                println("are incorporated into the mainstream lexer processing.");
195:                tabs++;
196:                // Enumerate all of the symbols and look for string literal symbols
197:                Enumeration ids = grammar.getSymbols();
198:                while ( ids.hasMoreElements() ) {
199:                	GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
200:                	// Only processing string literals -- reject other symbol entries
201:                	if ( sym instanceof StringLiteralSymbol ) {
202:                		StringLiteralSymbol s = (StringLiteralSymbol)sym;
203:                		println(s.getId() + " = " + s.getTokenType());
204:                	}
205:                }
206:                tabs--;
207:                println("*** End of string literals used by the parser");
208:                 */
209:
210:                // Generate nextToken() rule.
211:                // nextToken() is a synthetic lexer rule that is the implicit OR of all
212:                // user-defined lexer rules.
213:                genNextToken();
214:
215:                // Generate code for each rule in the lexer
216:
217:                Enumeration ids = grammar.rules.elements();
218:                while (ids.hasMoreElements()) {
219:                    RuleSymbol rs = (RuleSymbol) ids.nextElement();
220:                    if (!rs.id.equals("mnextToken")) {
221:                        genRule(rs);
222:                    }
223:                }
224:
225:                // Close the lexer output file
226:                currentOutput.close();
227:                currentOutput = null;
228:                doingLexRules = false;
229:            }
230:
231:            /** Generate code for the given grammar element.
232:             * @param blk The (...)+ block to generate
233:             */
234:            public void gen(OneOrMoreBlock blk, Context context) {
235:                genGenericBlock(blk, "+");
236:            }
237:
238:            /** Generate the parser HTML file */
239:            public void gen(ParserGrammar g) throws IOException {
240:                setGrammar(g);
241:                // Open the output stream for the parser and set the currentOutput
242:                antlrTool.reportProgress("Generating " + grammar.getClassName()
243:                        + ".sgml");
244:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
245:                        + ".sgml");
246:
247:                tabs = 0;
248:
249:                // Generate the header common to all output files.
250:                genHeader();
251:
252:                // Generate parser class definition
253:                println("");
254:
255:                // print javadoc comment if any
256:                if (grammar.comment != null) {
257:                    _println(HTMLEncode(grammar.comment));
258:                }
259:
260:                println("<para>Definition of parser " + grammar.getClassName()
261:                        + ", which is a subclass of " + grammar.getSuperClass()
262:                        + ".</para>");
263:
264:                // Enumerate the parser rules
265:                Enumeration rules = grammar.rules.elements();
266:                while (rules.hasMoreElements()) {
267:                    println("");
268:                    // Get the rules from the list and downcast it to proper type
269:                    GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
270:                    // Only process parser rules
271:                    if (sym instanceof  RuleSymbol) {
272:                        genRule((RuleSymbol) sym);
273:                    }
274:                }
275:                tabs--;
276:                println("");
277:
278:                genTail();
279:
280:                // Close the parser output stream
281:                currentOutput.close();
282:                currentOutput = null;
283:            }
284:
285:            /** Generate code for the given grammar element.
286:             * @param blk The rule-reference to generate
287:             */
288:            public void gen(RuleRefElement rr, Context context) {
289:                RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule);
290:
291:                // Generate the actual rule description
292:                _print("<link linkend=\"" + QuoteForId(rr.targetRule) + "\">");
293:                _print(rr.targetRule);
294:                _print("</link>");
295:                // RK: Leave out args..
296:                //	if (rr.args != null) {
297:                //		_print("["+rr.args+"]");
298:                //	}
299:                _print(" ");
300:            }
301:
302:            /** Generate code for the given grammar element.
303:             * @param blk The string-literal reference to generate
304:             */
305:            public void gen(StringLiteralElement atom, Context context) {
306:                if (atom.not) {
307:                    _print("~");
308:                }
309:                _print(HTMLEncode(atom.atomText));
310:                _print(" ");
311:            }
312:
313:            /** Generate code for the given grammar element.
314:             * @param blk The token-range reference to generate
315:             */
316:            public void gen(TokenRangeElement r, Context context) {
317:                print(r.beginText + ".." + r.endText + " ");
318:            }
319:
320:            /** Generate code for the given grammar element.
321:             * @param blk The token-reference to generate
322:             */
323:            public void gen(TokenRefElement atom, Context context) {
324:                if (atom.not) {
325:                    _print("~");
326:                }
327:                _print(atom.atomText);
328:                _print(" ");
329:            }
330:
331:            public void gen(TreeElement t, Context context) {
332:                print(t + " ");
333:            }
334:
335:            /** Generate the tree-walker TXT file */
336:            public void gen(TreeWalkerGrammar g) throws IOException {
337:                setGrammar(g);
338:                // Open the output stream for the parser and set the currentOutput
339:                antlrTool.reportProgress("Generating " + grammar.getClassName()
340:                        + ".sgml");
341:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
342:                        + ".sgml");
343:                //SAS: changed for proper text file io
344:
345:                tabs = 0;
346:
347:                // Generate the header common to all output files.
348:                genHeader();
349:
350:                // Output the user-defined parser premamble
351:                println("");
352:                //		println("*** Tree-walker Preamble Action.");
353:                //		println("This action will appear before the declaration of your tree-walker class:");
354:                //		tabs++;
355:                //		println(grammar.preambleAction.getText());
356:                //		tabs--;
357:                //		println("*** End of tree-walker Preamble Action");
358:
359:                // Generate tree-walker class definition
360:                println("");
361:
362:                // print javadoc comment if any
363:                if (grammar.comment != null) {
364:                    _println(HTMLEncode(grammar.comment));
365:                }
366:
367:                println("<para>Definition of tree parser "
368:                        + grammar.getClassName() + ", which is a subclass of "
369:                        + grammar.getSuperClass() + ".</para>");
370:
371:                // Generate user-defined tree-walker class members
372:                //		println("");
373:                //		println("*** User-defined tree-walker class members:");
374:                //		println("These are the member declarations that you defined for your class:");
375:                //		tabs++;
376:                //		printAction(grammar.classMemberAction.getText());
377:                //		tabs--;
378:                //		println("*** End of user-defined tree-walker class members");
379:
380:                // Generate code for each rule in the grammar
381:                println("");
382:                //		println("*** tree-walker rules:");
383:                tabs++;
384:
385:                // Enumerate the tree-walker rules
386:                Enumeration rules = grammar.rules.elements();
387:                while (rules.hasMoreElements()) {
388:                    println("");
389:                    // Get the rules from the list and downcast it to proper type
390:                    GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
391:                    // Only process tree-walker rules
392:                    if (sym instanceof  RuleSymbol) {
393:                        genRule((RuleSymbol) sym);
394:                    }
395:                }
396:                tabs--;
397:                println("");
398:                //		println("*** End of tree-walker rules");
399:
400:                //		println("");
401:                //		println("*** End of tree-walker");
402:
403:                // Close the tree-walker output stream
404:                currentOutput.close();
405:                currentOutput = null;
406:            }
407:
408:            /** Generate a wildcard element */
409:            public void gen(WildcardElement wc, Context context) {
410:                /*
411:                if ( wc.getLabel()!=null ) {
412:                	_print(wc.getLabel()+"=");
413:                }
414:                 */
415:                _print(". ");
416:            }
417:
418:            /** Generate code for the given grammar element.
419:             * @param blk The (...)* block to generate
420:             */
421:            public void gen(ZeroOrMoreBlock blk, Context context) {
422:                genGenericBlock(blk, "*");
423:            }
424:
425:            protected void genAlt(Alternative alt) {
426:                if (alt.getTreeSpecifier() != null) {
427:                    _print(alt.getTreeSpecifier().getText());
428:                }
429:                prevAltElem = null;
430:                for (AlternativeElement elem = alt.head; !(elem instanceof  BlockEndElement); elem = elem.next) {
431:                    elem.generate(null);
432:                    firstElementInAlt = false;
433:                    prevAltElem = elem;
434:                }
435:            }
436:
437:            /** Generate the header for a block, which may be a RuleBlock or a
438:             * plain AlternativeBLock.  This generates any variable declarations,
439:             * init-actions, and syntactic-predicate-testing variables.
440:             * @blk The block for which the preamble is to be generated.
441:             */
442:            //	protected void genBlockPreamble(AlternativeBlock blk) {
443:            // RK: don't dump out init actions
444:            // dump out init action
445:            //		if ( blk.initAction!=null ) {
446:            //			printAction("{" + blk.initAction + "}");
447:            //		}
448:            //	}
449:            /** Generate common code for a block of alternatives; return a postscript
450:             * that needs to be generated at the end of the block.  Other routines
451:             * may append else-clauses and such for error checking before the postfix
452:             * is generated.
453:             */
454:            public void genCommonBlock(AlternativeBlock blk) {
455:                if (blk.alternatives.size() > 1)
456:                    println("<itemizedlist mark=\"none\">");
457:                for (int i = 0; i < blk.alternatives.size(); i++) {
458:                    Alternative alt = blk.getAlternativeAt(i);
459:                    AlternativeElement elem = alt.head;
460:
461:                    if (blk.alternatives.size() > 1)
462:                        print("<listitem><para>");
463:
464:                    // dump alt operator |
465:                    if (i > 0 && blk.alternatives.size() > 1) {
466:                        _print("| ");
467:                    }
468:
469:                    // Dump the alternative, starting with predicates
470:                    //
471:                    boolean save = firstElementInAlt;
472:                    firstElementInAlt = true;
473:                    tabs++; // in case we do a newline in alt, increase the tab indent
474:
475:                    genAlt(alt);
476:                    tabs--;
477:                    firstElementInAlt = save;
478:                    if (blk.alternatives.size() > 1)
479:                        _println("</para></listitem>");
480:                }
481:                if (blk.alternatives.size() > 1)
482:                    println("</itemizedlist>");
483:            }
484:
485:            /** Generate a textual representation of the follow set
486:             * for a block.
487:             * @param blk  The rule block of interest
488:             */
489:            public void genFollowSetForRuleBlock(RuleBlock blk) {
490:                Lookahead follow = grammar.theLLkAnalyzer
491:                        .FOLLOW(1, blk.endNode);
492:                printSet(grammar.maxk, 1, follow);
493:            }
494:
495:            protected void genGenericBlock(AlternativeBlock blk, String blkOp) {
496:                if (blk.alternatives.size() > 1) {
497:                    // make sure we start on a new line
498:                    _println("");
499:                    if (!firstElementInAlt) {
500:                        // only do newline if the last element wasn't a multi-line block
501:                        //if ( prevAltElem==null ||
502:                        //	 !(prevAltElem instanceof AlternativeBlock) ||
503:                        //	 ((AlternativeBlock)prevAltElem).alternatives.size()==1 )
504:                        //{
505:                        _println("(");
506:                        //}
507:                        //else
508:                        //{
509:                        //	_print("(");
510:                        //}
511:                        // _println("");
512:                        // print("(\t");
513:                    } else {
514:                        _print("(");
515:                    }
516:                } else {
517:                    _print("( ");
518:                }
519:                // RK: don't dump init actions
520:                //	genBlockPreamble(blk);
521:                genCommonBlock(blk);
522:                if (blk.alternatives.size() > 1) {
523:                    _println("");
524:                    print(")" + blkOp + " ");
525:                    // if not last element of alt, need newline & to indent
526:                    if (!(blk.next instanceof  BlockEndElement)) {
527:                        _println("");
528:                        print("");
529:                    }
530:                } else {
531:                    _print(")" + blkOp + " ");
532:                }
533:            }
534:
535:            /** Generate a header that is common to all TXT files */
536:            protected void genHeader() {
537:                println("<?xml version=\"1.0\" standalone=\"no\"?>");
538:                println("<!DOCTYPE book PUBLIC \"-//OASIS//DTD DocBook V3.1//EN\">");
539:                println("<book lang=\"en\">");
540:                println("<bookinfo>");
541:                println("<title>Grammar " + grammar.getClassName() + "</title>");
542:                println("  <author>");
543:                println("    <firstname></firstname>");
544:                println("    <othername></othername>");
545:                println("    <surname></surname>");
546:                println("    <affiliation>");
547:                println("     <address>");
548:                println("     <email></email>");
549:                println("     </address>");
550:                println("    </affiliation>");
551:                println("  </author>");
552:                println("  <othercredit>");
553:                println("    <contrib>");
554:                println("    Generated by <ulink url=\"http://www.ANTLR.org/\">ANTLR</ulink>"
555:                        + antlrTool.version);
556:                println("    from " + antlrTool.grammarFile);
557:                println("    </contrib>");
558:                println("  </othercredit>");
559:                println("  <pubdate></pubdate>");
560:                println("  <abstract>");
561:                println("  <para>");
562:                println("  </para>");
563:                println("  </abstract>");
564:                println("</bookinfo>");
565:                println("<chapter>");
566:                println("<title></title>");
567:            }
568:
569:            /**Generate the lookahead set for an alternate. */
570:            protected void genLookaheadSetForAlt(Alternative alt) {
571:                if (doingLexRules && alt.cache[1].containsEpsilon()) {
572:                    println("MATCHES ALL");
573:                    return;
574:                }
575:                int depth = alt.lookaheadDepth;
576:                if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
577:                    // if the decision is nondeterministic, do the best we can: LL(k)
578:                    // any predicates that are around will be generated later.
579:                    depth = grammar.maxk;
580:                }
581:                for (int i = 1; i <= depth; i++) {
582:                    Lookahead lookahead = alt.cache[i];
583:                    printSet(depth, i, lookahead);
584:                }
585:            }
586:
587:            /** Generate a textual representation of the lookahead set
588:             * for a block.
589:             * @param blk  The block of interest
590:             */
591:            public void genLookaheadSetForBlock(AlternativeBlock blk) {
592:                // Find the maximal lookahead depth over all alternatives
593:                int depth = 0;
594:                for (int i = 0; i < blk.alternatives.size(); i++) {
595:                    Alternative alt = blk.getAlternativeAt(i);
596:                    if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
597:                        depth = grammar.maxk;
598:                        break;
599:                    } else if (depth < alt.lookaheadDepth) {
600:                        depth = alt.lookaheadDepth;
601:                    }
602:                }
603:
604:                for (int i = 1; i <= depth; i++) {
605:                    Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
606:                    printSet(depth, i, lookahead);
607:                }
608:            }
609:
610:            /** Generate the nextToken rule.
611:             * nextToken is a synthetic lexer rule that is the implicit OR of all
612:             * user-defined lexer rules.
613:             */
614:            public void genNextToken() {
615:                println("");
616:                println("/** Lexer nextToken rule:");
617:                println(" *  The lexer nextToken rule is synthesized from all of the user-defined");
618:                println(" *  lexer rules.  It logically consists of one big alternative block with");
619:                println(" *  each user-defined rule being an alternative.");
620:                println(" */");
621:
622:                // Create the synthesized rule block for nextToken consisting
623:                // of an alternate block containing all the user-defined lexer rules.
624:                RuleBlock blk = MakeGrammar.createNextTokenRule(grammar,
625:                        grammar.rules, "nextToken");
626:
627:                // Define the nextToken rule symbol
628:                RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
629:                nextTokenRs.setDefined();
630:                nextTokenRs.setBlock(blk);
631:                nextTokenRs.access = "private";
632:                grammar.define(nextTokenRs);
633:
634:                /*
635:                // Analyze the synthesized block
636:                if (!grammar.theLLkAnalyzer.deterministic(blk))
637:                {
638:                	println("The grammar analyzer has determined that the synthesized");
639:                	println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
640:                	println("This means that there is some overlap of the character");
641:                	println("lookahead for two or more of your lexer rules.");
642:                }
643:                 */
644:
645:                genCommonBlock(blk);
646:            }
647:
648:            /** Generate code for a named rule block
649:             * @param s The RuleSymbol describing the rule to generate
650:             */
651:            public void genRule(RuleSymbol s) {
652:                if (s == null || !s.isDefined())
653:                    return; // undefined rule
654:                println("");
655:
656:                if (s.access.length() != 0) {
657:                    if (!s.access.equals("public")) {
658:                        _print("<para>" + s.access + " </para>");
659:                    }
660:                }
661:
662:                println("<section id=\"" + QuoteForId(s.getId()) + "\">");
663:                println("<title>" + s.getId() + "</title>");
664:                if (s.comment != null) {
665:                    _println("<para>" + HTMLEncode(s.comment) + "</para>");
666:                }
667:                println("<para>");
668:
669:                // Get rule return type and arguments
670:                RuleBlock rblk = s.getBlock();
671:
672:                // RK: for HTML output not of much value...
673:                // Gen method return value(s)
674:                //		if (rblk.returnAction != null) {
675:                //			_print("["+rblk.returnAction+"]");
676:                //		}
677:                // Gen arguments
678:                //		if (rblk.argAction != null)
679:                //		{
680:                //				_print(" returns [" + rblk.argAction+"]");
681:                //		}
682:                _println("");
683:                print(s.getId() + ":\t");
684:                tabs++;
685:
686:                // Dump any init-action
687:                // genBlockPreamble(rblk);
688:
689:                // Dump the alternates of the rule
690:                genCommonBlock(rblk);
691:
692:                _println("");
693:                //		println(";");
694:                tabs--;
695:                _println("</para>");
696:                _println("</section><!-- section \"" + s.getId() + "\" -->");
697:            }
698:
699:            /** Generate the syntactic predicate.  This basically generates
700:             * the alternative block, buts tracks if we are inside a synPred
701:             * @param blk  The syntactic predicate block
702:             */
703:            protected void genSynPred(SynPredBlock blk) {
704:                // no op
705:            }
706:
707:            public void genTail() {
708:                println("</chapter>");
709:                println("</book>");
710:            }
711:
712:            /** Generate the token types TXT file */
713:            protected void genTokenTypes(TokenManager tm) throws IOException {
714:                // Open the token output TXT file and set the currentOutput stream
715:                antlrTool.reportProgress("Generating " + tm.getName()
716:                        + TokenTypesFileSuffix + TokenTypesFileExt);
717:                currentOutput = antlrTool.openOutputFile(tm.getName()
718:                        + TokenTypesFileSuffix + TokenTypesFileExt);
719:                //SAS: changed for proper text file io
720:                tabs = 0;
721:
722:                // Generate the header common to all diagnostic files
723:                genHeader();
724:
725:                // Generate a string for each token.  This creates a static
726:                // array of Strings indexed by token type.
727:                println("");
728:                println("*** Tokens used by the parser");
729:                println("This is a list of the token numeric values and the corresponding");
730:                println("token identifiers.  Some tokens are literals, and because of that");
731:                println("they have no identifiers.  Literals are double-quoted.");
732:                tabs++;
733:
734:                // Enumerate all the valid token types
735:                Vector v = tm.getVocabulary();
736:                for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
737:                    String s = (String) v.elementAt(i);
738:                    if (s != null) {
739:                        println(s + " = " + i);
740:                    }
741:                }
742:
743:                // Close the interface
744:                tabs--;
745:                println("*** End of tokens used by the parser");
746:
747:                // Close the tokens output file
748:                currentOutput.close();
749:                currentOutput = null;
750:            }
751:
752:            /// unused.
753:            protected String processActionForSpecialSymbols(String actionStr,
754:                    int line, RuleBlock currentRule, ActionTransInfo tInfo) {
755:                return actionStr;
756:            }
757:
758:            /** Get a string for an expression to generate creation of an AST subtree.
759:             * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
760:             */
761:            public String getASTCreateString(Vector v) {
762:                return null;
763:            }
764:
765:            /** Get a string for an expression to generate creating of an AST node
766:             * @param str The arguments to the AST constructor
767:             */
768:            public String getASTCreateString(GrammarAtom atom, String str) {
769:                return null;
770:            }
771:
772:            /** Map an identifier to it's corresponding tree-node variable.
773:             * This is context-sensitive, depending on the rule and alternative
774:             * being generated
775:             * @param id The identifier name to map
776:             * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
777:             */
778:            public String mapTreeId(String id, ActionTransInfo tInfo) {
779:                return id;
780:            }
781:
782:            /** Format a lookahead or follow set.
783:             * @param depth The depth of the entire lookahead/follow
784:             * @param k The lookahead level to print
785:             * @param lookahead  The lookahead/follow set to print
786:             */
787:            public void printSet(int depth, int k, Lookahead lookahead) {
788:                int numCols = 5;
789:
790:                int[] elems = lookahead.fset.toArray();
791:
792:                if (depth != 1) {
793:                    print("k==" + k + ": {");
794:                } else {
795:                    print("{ ");
796:                }
797:                if (elems.length > numCols) {
798:                    _println("");
799:                    tabs++;
800:                    print("");
801:                }
802:
803:                int column = 0;
804:                for (int i = 0; i < elems.length; i++) {
805:                    column++;
806:                    if (column > numCols) {
807:                        _println("");
808:                        print("");
809:                        column = 0;
810:                    }
811:                    if (doingLexRules) {
812:                        _print(charFormatter.literalChar(elems[i]));
813:                    } else {
814:                        _print((String) grammar.tokenManager.getVocabulary()
815:                                .elementAt(elems[i]));
816:                    }
817:                    if (i != elems.length - 1) {
818:                        _print(", ");
819:                    }
820:                }
821:
822:                if (elems.length > numCols) {
823:                    _println("");
824:                    tabs--;
825:                    print("");
826:                }
827:                _println(" }");
828:            }
829:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.