Source Code Cross Referenced for HTMLCodeGenerator.java in  » Database-ORM » toplink » persistence » antlr » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Database ORM » toplink » persistence.antlr 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package persistence.antlr;
002:
003:        /* ANTLR Translator Generator
004:         * Project led by Terence Parr at http://www.jGuru.com
005:         * Software rights: http://www.antlr.org/license.html
006:         *
007:         */
008:
009:        import java.util.Enumeration;
010:
011:        import persistence.antlr.collections.impl.BitSet;
012:        import persistence.antlr.collections.impl.Vector;
013:
014:        import java.io.PrintWriter; //SAS: changed for proper text file io
015:        import java.io.IOException;
016:        import java.io.FileWriter;
017:
018:        /**Generate P.html, a cross-linked representation of P with or without actions */
019:        public class HTMLCodeGenerator extends CodeGenerator {
020:            /** non-zero if inside syntactic predicate generation */
021:            protected int syntacticPredLevel = 0;
022:
023:            /** true during lexer generation, false during parser generation */
024:            protected boolean doingLexRules = false;
025:
026:            protected boolean firstElementInAlt;
027:
028:            protected AlternativeElement prevAltElem = null; // what was generated last?
029:
030:            /** Create a Diagnostic code-generator using the given Grammar
031:             * The caller must still call setTool, setBehavior, and setAnalyzer
032:             * before generating code.
033:             */
034:            public HTMLCodeGenerator() {
035:                super ();
036:                charFormatter = new JavaCharFormatter();
037:            }
038:
039:            /** Encode a string for printing in a HTML document..
040:             * e.g. encode '<' '>' and similar stuff
041:             * @param s the string to encode
042:             */
043:            static String HTMLEncode(String s) {
044:                StringBuffer buf = new StringBuffer();
045:
046:                for (int i = 0, len = s.length(); i < len; i++) {
047:                    char c = s.charAt(i);
048:                    if (c == '&')
049:                        buf.append("&amp;");
050:                    else if (c == '\"')
051:                        buf.append("&quot;");
052:                    else if (c == '\'')
053:                        buf.append("&#039;");
054:                    else if (c == '<')
055:                        buf.append("&lt;");
056:                    else if (c == '>')
057:                        buf.append("&gt;");
058:                    else
059:                        buf.append(c);
060:                }
061:                return buf.toString();
062:            }
063:
064:            public void gen() {
065:                // Do the code generation
066:                try {
067:                    // Loop over all grammars
068:                    Enumeration grammarIter = behavior.grammars.elements();
069:                    while (grammarIter.hasMoreElements()) {
070:                        Grammar g = (Grammar) grammarIter.nextElement();
071:
072:                        // Connect all the components to each other
073:                        /*
074:                        g.setGrammarAnalyzer(analyzer);
075:                        analyzer.setGrammar(g);
076:                         */
077:                        g.setCodeGenerator(this );
078:
079:                        // To get right overloading behavior across hetrogeneous grammars
080:                        g.generate();
081:
082:                        if (antlrTool.hasError()) {
083:                            antlrTool.fatalError("Exiting due to errors.");
084:                        }
085:
086:                    }
087:
088:                } catch (IOException e) {
089:                    antlrTool.reportException(e, null);
090:                }
091:            }
092:
093:            /** Generate code for the given grammar element.
094:             * @param blk The {...} action to generate
095:             */
096:            public void gen(ActionElement action) {
097:                // no-op
098:            }
099:
100:            /** Generate code for the given grammar element.
101:             * @param blk The "x|y|z|..." block to generate
102:             */
103:            public void gen(AlternativeBlock blk) {
104:                genGenericBlock(blk, "");
105:            }
106:
107:            /** Generate code for the given grammar element.
108:             * @param blk The block-end element to generate.  Block-end
109:             * elements are synthesized by the grammar parser to represent
110:             * the end of a block.
111:             */
112:            public void gen(BlockEndElement end) {
113:                // no-op
114:            }
115:
116:            /** Generate code for the given grammar element.
117:             * @param blk The character literal reference to generate
118:             */
119:            public void gen(CharLiteralElement atom) {
120:                if (atom.not) {
121:                    _print("~");
122:                }
123:                _print(HTMLEncode(atom.atomText) + " ");
124:            }
125:
126:            /** Generate code for the given grammar element.
127:             * @param blk The character-range reference to generate
128:             */
129:            public void gen(CharRangeElement r) {
130:                print(r.beginText + ".." + r.endText + " ");
131:            }
132:
133:            /** Generate the lexer HTML file */
134:            public void gen(LexerGrammar g) throws IOException {
135:                setGrammar(g);
136:                antlrTool.reportProgress("Generating " + grammar.getClassName()
137:                        + TokenTypesFileExt);
138:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
139:                        + TokenTypesFileExt);
140:                //SAS: changed for proper text file io
141:
142:                tabs = 0;
143:                doingLexRules = true;
144:
145:                // Generate header common to all TXT output files
146:                genHeader();
147:
148:                // Output the user-defined lexer premamble
149:                // RK: guess not..
150:                // println(grammar.preambleAction.getText());
151:
152:                // Generate lexer class definition
153:                println("");
154:
155:                // print javadoc comment if any
156:                if (grammar.comment != null) {
157:                    _println(HTMLEncode(grammar.comment));
158:                }
159:
160:                println("Definition of lexer " + grammar.getClassName()
161:                        + ", which is a subclass of " + grammar.getSuperClass()
162:                        + ".");
163:
164:                // Generate user-defined parser class members
165:                // printAction(grammar.classMemberAction.getText());
166:
167:                /*
168:                // Generate string literals
169:                println("");
170:                println("*** String literals used in the parser");
171:                println("The following string literals were used in the parser.");
172:                println("An actual code generator would arrange to place these literals");
173:                println("into a table in the generated lexer, so that actions in the");
174:                println("generated lexer could match token text against the literals.");
175:                println("String literals used in the lexer are not listed here, as they");
176:                println("are incorporated into the mainstream lexer processing.");
177:                tabs++;
178:                // Enumerate all of the symbols and look for string literal symbols
179:                Enumeration ids = grammar.getSymbols();
180:                while ( ids.hasMoreElements() ) {
181:                	GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
182:                	// Only processing string literals -- reject other symbol entries
183:                	if ( sym instanceof StringLiteralSymbol ) {
184:                		StringLiteralSymbol s = (StringLiteralSymbol)sym;
185:                		println(s.getId() + " = " + s.getTokenType());
186:                	}
187:                }
188:                tabs--;
189:                println("*** End of string literals used by the parser");
190:                 */
191:
192:                // Generate nextToken() rule.
193:                // nextToken() is a synthetic lexer rule that is the implicit OR of all
194:                // user-defined lexer rules.
195:                genNextToken();
196:
197:                // Generate code for each rule in the lexer
198:
199:                Enumeration ids = grammar.rules.elements();
200:                while (ids.hasMoreElements()) {
201:                    RuleSymbol rs = (RuleSymbol) ids.nextElement();
202:                    if (!rs.id.equals("mnextToken")) {
203:                        genRule(rs);
204:                    }
205:                }
206:
207:                // Close the lexer output file
208:                currentOutput.close();
209:                currentOutput = null;
210:                doingLexRules = false;
211:            }
212:
213:            /** Generate code for the given grammar element.
214:             * @param blk The (...)+ block to generate
215:             */
216:            public void gen(OneOrMoreBlock blk) {
217:                genGenericBlock(blk, "+");
218:            }
219:
220:            /** Generate the parser HTML file */
221:            public void gen(ParserGrammar g) throws IOException {
222:                setGrammar(g);
223:                // Open the output stream for the parser and set the currentOutput
224:                antlrTool.reportProgress("Generating " + grammar.getClassName()
225:                        + ".html");
226:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
227:                        + ".html");
228:
229:                tabs = 0;
230:
231:                // Generate the header common to all output files.
232:                genHeader();
233:
234:                // Generate parser class definition
235:                println("");
236:
237:                // print javadoc comment if any
238:                if (grammar.comment != null) {
239:                    _println(HTMLEncode(grammar.comment));
240:                }
241:
242:                println("Definition of parser " + grammar.getClassName()
243:                        + ", which is a subclass of " + grammar.getSuperClass()
244:                        + ".");
245:
246:                // Enumerate the parser rules
247:                Enumeration rules = grammar.rules.elements();
248:                while (rules.hasMoreElements()) {
249:                    println("");
250:                    // Get the rules from the list and downcast it to proper type
251:                    GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
252:                    // Only process parser rules
253:                    if (sym instanceof  RuleSymbol) {
254:                        genRule((RuleSymbol) sym);
255:                    }
256:                }
257:                tabs--;
258:                println("");
259:
260:                genTail();
261:
262:                // Close the parser output stream
263:                currentOutput.close();
264:                currentOutput = null;
265:            }
266:
267:            /** Generate code for the given grammar element.
268:             * @param blk The rule-reference to generate
269:             */
270:            public void gen(RuleRefElement rr) {
271:                RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule);
272:
273:                // Generate the actual rule description
274:                _print("<a href=\"" + grammar.getClassName() + ".html#"
275:                        + rr.targetRule + "\">");
276:                _print(rr.targetRule);
277:                _print("</a>");
278:                // RK: Leave out args..
279:                //	if (rr.args != null) {
280:                //		_print("["+rr.args+"]");
281:                //	}
282:                _print(" ");
283:            }
284:
285:            /** Generate code for the given grammar element.
286:             * @param blk The string-literal reference to generate
287:             */
288:            public void gen(StringLiteralElement atom) {
289:                if (atom.not) {
290:                    _print("~");
291:                }
292:                _print(HTMLEncode(atom.atomText));
293:                _print(" ");
294:            }
295:
296:            /** Generate code for the given grammar element.
297:             * @param blk The token-range reference to generate
298:             */
299:            public void gen(TokenRangeElement r) {
300:                print(r.beginText + ".." + r.endText + " ");
301:            }
302:
303:            /** Generate code for the given grammar element.
304:             * @param blk The token-reference to generate
305:             */
306:            public void gen(TokenRefElement atom) {
307:                if (atom.not) {
308:                    _print("~");
309:                }
310:                _print(atom.atomText);
311:                _print(" ");
312:            }
313:
314:            public void gen(TreeElement t) {
315:                print(t + " ");
316:            }
317:
318:            /** Generate the tree-walker TXT file */
319:            public void gen(TreeWalkerGrammar g) throws IOException {
320:                setGrammar(g);
321:                // Open the output stream for the parser and set the currentOutput
322:                antlrTool.reportProgress("Generating " + grammar.getClassName()
323:                        + ".html");
324:                currentOutput = antlrTool.openOutputFile(grammar.getClassName()
325:                        + ".html");
326:                //SAS: changed for proper text file io
327:
328:                tabs = 0;
329:
330:                // Generate the header common to all output files.
331:                genHeader();
332:
333:                // Output the user-defined parser premamble
334:                println("");
335:                //		println("*** Tree-walker Preamble Action.");
336:                //		println("This action will appear before the declaration of your tree-walker class:");
337:                //		tabs++;
338:                //		println(grammar.preambleAction.getText());
339:                //		tabs--;
340:                //		println("*** End of tree-walker Preamble Action");
341:
342:                // Generate tree-walker class definition
343:                println("");
344:
345:                // print javadoc comment if any
346:                if (grammar.comment != null) {
347:                    _println(HTMLEncode(grammar.comment));
348:                }
349:
350:                println("Definition of tree parser " + grammar.getClassName()
351:                        + ", which is a subclass of " + grammar.getSuperClass()
352:                        + ".");
353:
354:                // Generate user-defined tree-walker class members
355:                //		println("");
356:                //		println("*** User-defined tree-walker class members:");
357:                //		println("These are the member declarations that you defined for your class:");
358:                //		tabs++;
359:                //		printAction(grammar.classMemberAction.getText());
360:                //		tabs--;
361:                //		println("*** End of user-defined tree-walker class members");
362:
363:                // Generate code for each rule in the grammar
364:                println("");
365:                //		println("*** tree-walker rules:");
366:                tabs++;
367:
368:                // Enumerate the tree-walker rules
369:                Enumeration rules = grammar.rules.elements();
370:                while (rules.hasMoreElements()) {
371:                    println("");
372:                    // Get the rules from the list and downcast it to proper type
373:                    GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
374:                    // Only process tree-walker rules
375:                    if (sym instanceof  RuleSymbol) {
376:                        genRule((RuleSymbol) sym);
377:                    }
378:                }
379:                tabs--;
380:                println("");
381:                //		println("*** End of tree-walker rules");
382:
383:                //		println("");
384:                //		println("*** End of tree-walker");
385:
386:                // Close the tree-walker output stream
387:                currentOutput.close();
388:                currentOutput = null;
389:            }
390:
391:            /** Generate a wildcard element */
392:            public void gen(WildcardElement wc) {
393:                /*
394:                if ( wc.getLabel()!=null ) {
395:                	_print(wc.getLabel()+"=");
396:                }
397:                 */
398:                _print(". ");
399:            }
400:
401:            /** Generate code for the given grammar element.
402:             * @param blk The (...)* block to generate
403:             */
404:            public void gen(ZeroOrMoreBlock blk) {
405:                genGenericBlock(blk, "*");
406:            }
407:
408:            protected void genAlt(Alternative alt) {
409:                if (alt.getTreeSpecifier() != null) {
410:                    _print(alt.getTreeSpecifier().getText());
411:                }
412:                prevAltElem = null;
413:                for (AlternativeElement elem = alt.head; !(elem instanceof  BlockEndElement); elem = elem.next) {
414:                    elem.generate();
415:                    firstElementInAlt = false;
416:                    prevAltElem = elem;
417:                }
418:            }
419:
420:            /** Generate the header for a block, which may be a RuleBlock or a
421:             * plain AlternativeBLock.  This generates any variable declarations,
422:             * init-actions, and syntactic-predicate-testing variables.
423:             * @blk The block for which the preamble is to be generated.
424:             */
425:            //	protected void genBlockPreamble(AlternativeBlock blk) {
426:            // RK: don't dump out init actions
427:            // dump out init action
428:            //		if ( blk.initAction!=null ) {
429:            //			printAction("{" + blk.initAction + "}");
430:            //		}
431:            //	}
432:            /**Generate common code for a block of alternatives; return a postscript
433:             * that needs to be generated at the end of the block.  Other routines
434:             * may append else-clauses and such for error checking before the postfix
435:             * is generated.
436:             */
437:            public void genCommonBlock(AlternativeBlock blk) {
438:                for (int i = 0; i < blk.alternatives.size(); i++) {
439:                    Alternative alt = blk.getAlternativeAt(i);
440:                    AlternativeElement elem = alt.head;
441:
442:                    // dump alt operator |
443:                    if (i > 0 && blk.alternatives.size() > 1) {
444:                        _println("");
445:                        print("|\t");
446:                    }
447:
448:                    // Dump the alternative, starting with predicates
449:                    //
450:                    boolean save = firstElementInAlt;
451:                    firstElementInAlt = true;
452:                    tabs++; // in case we do a newline in alt, increase the tab indent
453:
454:                    // RK: don't dump semantic/syntactic predicates
455:                    // only obscures grammar.
456:                    //
457:                    // Dump semantic predicates
458:                    //
459:                    //	if (alt.semPred != null) {
460:                    //		println("{" + alt.semPred + "}?");
461:                    //	}
462:                    // Dump syntactic predicate
463:                    //	if (alt.synPred != null) {
464:                    //		genSynPred(alt.synPred);
465:                    //	}
466:                    genAlt(alt);
467:                    tabs--;
468:                    firstElementInAlt = save;
469:                }
470:            }
471:
472:            /** Generate a textual representation of the follow set
473:             * for a block.
474:             * @param blk  The rule block of interest
475:             */
476:            public void genFollowSetForRuleBlock(RuleBlock blk) {
477:                Lookahead follow = grammar.theLLkAnalyzer
478:                        .FOLLOW(1, blk.endNode);
479:                printSet(grammar.maxk, 1, follow);
480:            }
481:
482:            protected void genGenericBlock(AlternativeBlock blk, String blkOp) {
483:                if (blk.alternatives.size() > 1) {
484:                    // make sure we start on a new line
485:                    if (!firstElementInAlt) {
486:                        // only do newline if the last element wasn't a multi-line block
487:                        if (prevAltElem == null
488:                                || !(prevAltElem instanceof  AlternativeBlock)
489:                                || ((AlternativeBlock) prevAltElem).alternatives
490:                                        .size() == 1) {
491:                            _println("");
492:                            print("(\t");
493:                        } else {
494:                            _print("(\t");
495:                        }
496:                        // _println("");
497:                        // print("(\t");
498:                    } else {
499:                        _print("(\t");
500:                    }
501:                } else {
502:                    _print("( ");
503:                }
504:                // RK: don't dump init actions
505:                //	genBlockPreamble(blk);
506:                genCommonBlock(blk);
507:                if (blk.alternatives.size() > 1) {
508:                    _println("");
509:                    print(")" + blkOp + " ");
510:                    // if not last element of alt, need newline & to indent
511:                    if (!(blk.next instanceof  BlockEndElement)) {
512:                        _println("");
513:                        print("");
514:                    }
515:                } else {
516:                    _print(")" + blkOp + " ");
517:                }
518:            }
519:
520:            /** Generate a header that is common to all TXT files */
521:            protected void genHeader() {
522:                println("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">");
523:                println("<HTML>");
524:                println("<HEAD>");
525:                println("<TITLE>Grammar " + antlrTool.grammarFile + "</TITLE>");
526:                println("</HEAD>");
527:                println("<BODY>");
528:                println("<table summary=\"\" border=\"1\" cellpadding=\"5\">");
529:                println("<tr>");
530:                println("<td>");
531:                println("<font size=\"+2\">Grammar " + grammar.getClassName()
532:                        + "</font><br>");
533:                println("<a href=\"http://www.ANTLR.org\">ANTLR</a>-generated HTML file from "
534:                        + antlrTool.grammarFile);
535:                println("<p>");
536:                println("Terence Parr, <a href=\"http://www.magelang.com\">MageLang Institute</a>");
537:                println("<br>ANTLR Version " + antlrTool.version
538:                        + "; 1989-1999");
539:                println("</td>");
540:                println("</tr>");
541:                println("</table>");
542:                println("<PRE>");
543:                // RK: see no reason for printing include files and stuff...
544:                //		tabs++;
545:                //		printAction(behavior.getHeaderAction(""));
546:                //		tabs--;
547:            }
548:
549:            /**Generate the lookahead set for an alternate. */
550:            protected void genLookaheadSetForAlt(Alternative alt) {
551:                if (doingLexRules && alt.cache[1].containsEpsilon()) {
552:                    println("MATCHES ALL");
553:                    return;
554:                }
555:                int depth = alt.lookaheadDepth;
556:                if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
557:                    // if the decision is nondeterministic, do the best we can: LL(k)
558:                    // any predicates that are around will be generated later.
559:                    depth = grammar.maxk;
560:                }
561:                for (int i = 1; i <= depth; i++) {
562:                    Lookahead lookahead = alt.cache[i];
563:                    printSet(depth, i, lookahead);
564:                }
565:            }
566:
567:            /** Generate a textual representation of the lookahead set
568:             * for a block.
569:             * @param blk  The block of interest
570:             */
571:            public void genLookaheadSetForBlock(AlternativeBlock blk) {
572:                // Find the maximal lookahead depth over all alternatives
573:                int depth = 0;
574:                for (int i = 0; i < blk.alternatives.size(); i++) {
575:                    Alternative alt = blk.getAlternativeAt(i);
576:                    if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
577:                        depth = grammar.maxk;
578:                        break;
579:                    } else if (depth < alt.lookaheadDepth) {
580:                        depth = alt.lookaheadDepth;
581:                    }
582:                }
583:
584:                for (int i = 1; i <= depth; i++) {
585:                    Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
586:                    printSet(depth, i, lookahead);
587:                }
588:            }
589:
590:            /** Generate the nextToken rule.
591:             * nextToken is a synthetic lexer rule that is the implicit OR of all
592:             * user-defined lexer rules.
593:             */
594:            public void genNextToken() {
595:                println("");
596:                println("/** Lexer nextToken rule:");
597:                println(" *  The lexer nextToken rule is synthesized from all of the user-defined");
598:                println(" *  lexer rules.  It logically consists of one big alternative block with");
599:                println(" *  each user-defined rule being an alternative.");
600:                println(" */");
601:
602:                // Create the synthesized rule block for nextToken consisting
603:                // of an alternate block containing all the user-defined lexer rules.
604:                RuleBlock blk = MakeGrammar.createNextTokenRule(grammar,
605:                        grammar.rules, "nextToken");
606:
607:                // Define the nextToken rule symbol
608:                RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
609:                nextTokenRs.setDefined();
610:                nextTokenRs.setBlock(blk);
611:                nextTokenRs.access = "private";
612:                grammar.define(nextTokenRs);
613:
614:                /*
615:                // Analyze the synthesized block
616:                if (!grammar.theLLkAnalyzer.deterministic(blk))
617:                {
618:                	println("The grammar analyzer has determined that the synthesized");
619:                	println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
620:                	println("This means that there is some overlap of the character");
621:                	println("lookahead for two or more of your lexer rules.");
622:                }
623:                 */
624:
625:                genCommonBlock(blk);
626:            }
627:
628:            /** Generate code for a named rule block
629:             * @param s The RuleSymbol describing the rule to generate
630:             */
631:            public void genRule(RuleSymbol s) {
632:                if (s == null || !s.isDefined())
633:                    return; // undefined rule
634:                println("");
635:                if (s.comment != null) {
636:                    _println(HTMLEncode(s.comment));
637:                }
638:                if (s.access.length() != 0) {
639:                    if (!s.access.equals("public")) {
640:                        _print(s.access + " ");
641:                    }
642:                }
643:                _print("<a name=\"" + s.getId() + "\">");
644:                _print(s.getId());
645:                _print("</a>");
646:
647:                // Get rule return type and arguments
648:                RuleBlock rblk = s.getBlock();
649:
650:                // RK: for HTML output not of much value...
651:                // Gen method return value(s)
652:                //		if (rblk.returnAction != null) {
653:                //			_print("["+rblk.returnAction+"]");
654:                //		}
655:                // Gen arguments
656:                //		if (rblk.argAction != null)
657:                //		{
658:                //				_print(" returns [" + rblk.argAction+"]");
659:                //		}
660:                _println("");
661:                tabs++;
662:                print(":\t");
663:
664:                // Dump any init-action
665:                // genBlockPreamble(rblk);
666:
667:                // Dump the alternates of the rule
668:                genCommonBlock(rblk);
669:
670:                _println("");
671:                println(";");
672:                tabs--;
673:            }
674:
675:            /** Generate the syntactic predicate.  This basically generates
676:             * the alternative block, buts tracks if we are inside a synPred
677:             * @param blk  The syntactic predicate block
678:             */
679:            protected void genSynPred(SynPredBlock blk) {
680:                syntacticPredLevel++;
681:                genGenericBlock(blk, " =>");
682:                syntacticPredLevel--;
683:            }
684:
685:            public void genTail() {
686:                println("</PRE>");
687:                println("</BODY>");
688:                println("</HTML>");
689:            }
690:
691:            /** Generate the token types TXT file */
692:            protected void genTokenTypes(TokenManager tm) throws IOException {
693:                // Open the token output TXT file and set the currentOutput stream
694:                antlrTool.reportProgress("Generating " + tm.getName()
695:                        + TokenTypesFileSuffix + TokenTypesFileExt);
696:                currentOutput = antlrTool.openOutputFile(tm.getName()
697:                        + TokenTypesFileSuffix + TokenTypesFileExt);
698:                //SAS: changed for proper text file io
699:                tabs = 0;
700:
701:                // Generate the header common to all diagnostic files
702:                genHeader();
703:
704:                // Generate a string for each token.  This creates a static
705:                // array of Strings indexed by token type.
706:                println("");
707:                println("*** Tokens used by the parser");
708:                println("This is a list of the token numeric values and the corresponding");
709:                println("token identifiers.  Some tokens are literals, and because of that");
710:                println("they have no identifiers.  Literals are double-quoted.");
711:                tabs++;
712:
713:                // Enumerate all the valid token types
714:                Vector v = tm.getVocabulary();
715:                for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
716:                    String s = (String) v.elementAt(i);
717:                    if (s != null) {
718:                        println(s + " = " + i);
719:                    }
720:                }
721:
722:                // Close the interface
723:                tabs--;
724:                println("*** End of tokens used by the parser");
725:
726:                // Close the tokens output file
727:                currentOutput.close();
728:                currentOutput = null;
729:            }
730:
731:            /** Get a string for an expression to generate creation of an AST subtree.
732:             * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
733:             */
734:            public String getASTCreateString(Vector v) {
735:                return null;
736:            }
737:
738:            /** Get a string for an expression to generate creating of an AST node
739:             * @param str The arguments to the AST constructor
740:             */
741:            public String getASTCreateString(GrammarAtom atom, String str) {
742:                return null;
743:            }
744:
745:            /** Map an identifier to it's corresponding tree-node variable.
746:             * This is context-sensitive, depending on the rule and alternative
747:             * being generated
748:             * @param id The identifier name to map
749:             * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
750:             */
751:            public String mapTreeId(String id, ActionTransInfo tInfo) {
752:                return id;
753:            }
754:
755:            /// unused.
756:            protected String processActionForSpecialSymbols(String actionStr,
757:                    int line, RuleBlock currentRule, ActionTransInfo tInfo) {
758:                return actionStr;
759:            }
760:
761:            /** Format a lookahead or follow set.
762:             * @param depth The depth of the entire lookahead/follow
763:             * @param k The lookahead level to print
764:             * @param lookahead  The lookahead/follow set to print
765:             */
766:            public void printSet(int depth, int k, Lookahead lookahead) {
767:                int numCols = 5;
768:
769:                int[] elems = lookahead.fset.toArray();
770:
771:                if (depth != 1) {
772:                    print("k==" + k + ": {");
773:                } else {
774:                    print("{ ");
775:                }
776:                if (elems.length > numCols) {
777:                    _println("");
778:                    tabs++;
779:                    print("");
780:                }
781:
782:                int column = 0;
783:                for (int i = 0; i < elems.length; i++) {
784:                    column++;
785:                    if (column > numCols) {
786:                        _println("");
787:                        print("");
788:                        column = 0;
789:                    }
790:                    if (doingLexRules) {
791:                        _print(charFormatter.literalChar(elems[i]));
792:                    } else {
793:                        _print((String) grammar.tokenManager.getVocabulary()
794:                                .elementAt(elems[i]));
795:                    }
796:                    if (i != elems.length - 1) {
797:                        _print(", ");
798:                    }
799:                }
800:
801:                if (elems.length > numCols) {
802:                    _println("");
803:                    tabs--;
804:                    print("");
805:                }
806:                _println(" }");
807:            }
808:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.