001: package persistence.antlr;
002:
003: /* ANTLR Translator Generator
004: * Project led by Terence Parr at http://www.jGuru.com
005: * Software rights: http://www.antlr.org/license.html
006: *
007: */
008:
009: import java.util.Enumeration;
010:
011: import persistence.antlr.collections.impl.BitSet;
012: import persistence.antlr.collections.impl.Vector;
013:
014: import java.io.PrintWriter; //SAS: changed for proper text file io
015: import java.io.IOException;
016: import java.io.FileWriter;
017:
018: /**Generate MyParser.txt, MyLexer.txt and MyParserTokenTypes.txt */
019: public class DiagnosticCodeGenerator extends CodeGenerator {
020: /** non-zero if inside syntactic predicate generation */
021: protected int syntacticPredLevel = 0;
022:
023: /** true during lexer generation, false during parser generation */
024: protected boolean doingLexRules = false;
025:
026: /** Create a Diagnostic code-generator using the given Grammar
027: * The caller must still call setTool, setBehavior, and setAnalyzer
028: * before generating code.
029: */
030: public DiagnosticCodeGenerator() {
031: super ();
032: charFormatter = new JavaCharFormatter();
033: }
034:
035: /**Generate the parser, lexer, and token types documentation */
036: public void gen() {
037:
038: // Do the code generation
039: try {
040: // Loop over all grammars
041: Enumeration grammarIter = behavior.grammars.elements();
042: while (grammarIter.hasMoreElements()) {
043: Grammar g = (Grammar) grammarIter.nextElement();
044:
045: // Connect all the components to each other
046: g.setGrammarAnalyzer(analyzer);
047: g.setCodeGenerator(this );
048: analyzer.setGrammar(g);
049:
050: // To get right overloading behavior across hetrogeneous grammars
051: g.generate();
052:
053: if (antlrTool.hasError()) {
054: antlrTool.panic("Exiting due to errors.");
055: }
056:
057: }
058:
059: // Loop over all token managers (some of which are lexers)
060: Enumeration tmIter = behavior.tokenManagers.elements();
061: while (tmIter.hasMoreElements()) {
062: TokenManager tm = (TokenManager) tmIter.nextElement();
063: if (!tm.isReadOnly()) {
064: // Write the token manager tokens as Java
065: genTokenTypes(tm);
066: }
067: }
068: } catch (IOException e) {
069: antlrTool.reportException(e, null);
070: }
071: }
072:
073: /** Generate code for the given grammar element.
074: * @param blk The {...} action to generate
075: */
076: public void gen(ActionElement action) {
077: if (action.isSemPred) {
078: // handled elsewhere
079: } else {
080: print("ACTION: ");
081: _printAction(action.actionText);
082: }
083: }
084:
085: /** Generate code for the given grammar element.
086: * @param blk The "x|y|z|..." block to generate
087: */
088: public void gen(AlternativeBlock blk) {
089: println("Start of alternative block.");
090: tabs++;
091: genBlockPreamble(blk);
092:
093: boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
094: if (!ok) {
095: println("Warning: This alternative block is non-deterministic");
096: }
097: genCommonBlock(blk);
098: tabs--;
099: }
100:
101: /** Generate code for the given grammar element.
102: * @param blk The block-end element to generate. Block-end
103: * elements are synthesized by the grammar parser to represent
104: * the end of a block.
105: */
106: public void gen(BlockEndElement end) {
107: // no-op
108: }
109:
110: /** Generate code for the given grammar element.
111: * @param blk The character literal reference to generate
112: */
113: public void gen(CharLiteralElement atom) {
114: print("Match character ");
115: if (atom.not) {
116: _print("NOT ");
117: }
118: _print(atom.atomText);
119: if (atom.label != null) {
120: _print(", label=" + atom.label);
121: }
122: _println("");
123: }
124:
125: /** Generate code for the given grammar element.
126: * @param blk The character-range reference to generate
127: */
128: public void gen(CharRangeElement r) {
129: print("Match character range: " + r.beginText + ".."
130: + r.endText);
131: if (r.label != null) {
132: _print(", label = " + r.label);
133: }
134: _println("");
135: }
136:
137: /** Generate the lexer TXT file */
138: public void gen(LexerGrammar g) throws IOException {
139: setGrammar(g);
140: antlrTool.reportProgress("Generating " + grammar.getClassName()
141: + TokenTypesFileExt);
142: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
143: + TokenTypesFileExt);
144: //SAS: changed for proper text file io
145:
146: tabs = 0;
147: doingLexRules = true;
148:
149: // Generate header common to all TXT output files
150: genHeader();
151:
152: // Output the user-defined lexer premamble
153: println("");
154: println("*** Lexer Preamble Action.");
155: println("This action will appear before the declaration of your lexer class:");
156: tabs++;
157: println(grammar.preambleAction.getText());
158: tabs--;
159: println("*** End of Lexer Preamble Action");
160:
161: // Generate lexer class definition
162: println("");
163: println("*** Your lexer class is called '"
164: + grammar.getClassName() + "' and is a subclass of '"
165: + grammar.getSuperClass() + "'.");
166:
167: // Generate user-defined parser class members
168: println("");
169: println("*** User-defined lexer class members:");
170: println("These are the member declarations that you defined for your class:");
171: tabs++;
172: printAction(grammar.classMemberAction.getText());
173: tabs--;
174: println("*** End of user-defined lexer class members");
175:
176: // Generate string literals
177: println("");
178: println("*** String literals used in the parser");
179: println("The following string literals were used in the parser.");
180: println("An actual code generator would arrange to place these literals");
181: println("into a table in the generated lexer, so that actions in the");
182: println("generated lexer could match token text against the literals.");
183: println("String literals used in the lexer are not listed here, as they");
184: println("are incorporated into the mainstream lexer processing.");
185: tabs++;
186: // Enumerate all of the symbols and look for string literal symbols
187: Enumeration ids = grammar.getSymbols();
188: while (ids.hasMoreElements()) {
189: GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
190: // Only processing string literals -- reject other symbol entries
191: if (sym instanceof StringLiteralSymbol) {
192: StringLiteralSymbol s = (StringLiteralSymbol) sym;
193: println(s.getId() + " = " + s.getTokenType());
194: }
195: }
196: tabs--;
197: println("*** End of string literals used by the parser");
198:
199: // Generate nextToken() rule.
200: // nextToken() is a synthetic lexer rule that is the implicit OR of all
201: // user-defined lexer rules.
202: genNextToken();
203:
204: // Generate code for each rule in the lexer
205: println("");
206: println("*** User-defined Lexer rules:");
207: tabs++;
208:
209: ids = grammar.rules.elements();
210: while (ids.hasMoreElements()) {
211: RuleSymbol rs = (RuleSymbol) ids.nextElement();
212: if (!rs.id.equals("mnextToken")) {
213: genRule(rs);
214: }
215: }
216:
217: tabs--;
218: println("");
219: println("*** End User-defined Lexer rules:");
220:
221: // Close the lexer output file
222: currentOutput.close();
223: currentOutput = null;
224: doingLexRules = false;
225: }
226:
227: /** Generate code for the given grammar element.
228: * @param blk The (...)+ block to generate
229: */
230: public void gen(OneOrMoreBlock blk) {
231: println("Start ONE-OR-MORE (...)+ block:");
232: tabs++;
233: genBlockPreamble(blk);
234: boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
235: if (!ok) {
236: println("Warning: This one-or-more block is non-deterministic");
237: }
238: genCommonBlock(blk);
239: tabs--;
240: println("End ONE-OR-MORE block.");
241: }
242:
243: /** Generate the parser TXT file */
244: public void gen(ParserGrammar g) throws IOException {
245: setGrammar(g);
246: // Open the output stream for the parser and set the currentOutput
247: antlrTool.reportProgress("Generating " + grammar.getClassName()
248: + TokenTypesFileExt);
249: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
250: + TokenTypesFileExt);
251: //SAS: changed for proper text file io
252:
253: tabs = 0;
254:
255: // Generate the header common to all output files.
256: genHeader();
257:
258: // Output the user-defined parser premamble
259: println("");
260: println("*** Parser Preamble Action.");
261: println("This action will appear before the declaration of your parser class:");
262: tabs++;
263: println(grammar.preambleAction.getText());
264: tabs--;
265: println("*** End of Parser Preamble Action");
266:
267: // Generate parser class definition
268: println("");
269: println("*** Your parser class is called '"
270: + grammar.getClassName() + "' and is a subclass of '"
271: + grammar.getSuperClass() + "'.");
272:
273: // Generate user-defined parser class members
274: println("");
275: println("*** User-defined parser class members:");
276: println("These are the member declarations that you defined for your class:");
277: tabs++;
278: printAction(grammar.classMemberAction.getText());
279: tabs--;
280: println("*** End of user-defined parser class members");
281:
282: // Generate code for each rule in the grammar
283: println("");
284: println("*** Parser rules:");
285: tabs++;
286:
287: // Enumerate the parser rules
288: Enumeration rules = grammar.rules.elements();
289: while (rules.hasMoreElements()) {
290: println("");
291: // Get the rules from the list and downcast it to proper type
292: GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
293: // Only process parser rules
294: if (sym instanceof RuleSymbol) {
295: genRule((RuleSymbol) sym);
296: }
297: }
298: tabs--;
299: println("");
300: println("*** End of parser rules");
301:
302: println("");
303: println("*** End of parser");
304:
305: // Close the parser output stream
306: currentOutput.close();
307: currentOutput = null;
308: }
309:
310: /** Generate code for the given grammar element.
311: * @param blk The rule-reference to generate
312: */
313: public void gen(RuleRefElement rr) {
314: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule);
315:
316: // Generate the actual rule description
317: print("Rule Reference: " + rr.targetRule);
318: if (rr.idAssign != null) {
319: _print(", assigned to '" + rr.idAssign + "'");
320: }
321: if (rr.args != null) {
322: _print(", arguments = " + rr.args);
323: }
324: _println("");
325:
326: // Perform diagnostics
327: if (rs == null || !rs.isDefined()) {
328: println("Rule '" + rr.targetRule
329: + "' is referenced, but that rule is not defined.");
330: println("\tPerhaps the rule is misspelled, or you forgot to define it.");
331: return;
332: }
333: if (!(rs instanceof RuleSymbol)) {
334: // Should this ever happen??
335: println("Rule '"
336: + rr.targetRule
337: + "' is referenced, but that is not a grammar rule.");
338: return;
339: }
340: if (rr.idAssign != null) {
341: // Warn if the rule has no return type
342: if (rs.block.returnAction == null) {
343: println("Error: You assigned from Rule '"
344: + rr.targetRule
345: + "', but that rule has no return type.");
346: }
347: } else {
348: // Warn about return value if any, but not inside syntactic predicate
349: if (!(grammar instanceof LexerGrammar)
350: && syntacticPredLevel == 0
351: && rs.block.returnAction != null) {
352: println("Warning: Rule '" + rr.targetRule
353: + "' returns a value");
354: }
355: }
356: if (rr.args != null && rs.block.argAction == null) {
357: println("Error: Rule '" + rr.targetRule
358: + "' accepts no arguments.");
359: }
360: }
361:
362: /** Generate code for the given grammar element.
363: * @param blk The string-literal reference to generate
364: */
365: public void gen(StringLiteralElement atom) {
366: print("Match string literal ");
367: _print(atom.atomText);
368: if (atom.label != null) {
369: _print(", label=" + atom.label);
370: }
371: _println("");
372: }
373:
374: /** Generate code for the given grammar element.
375: * @param blk The token-range reference to generate
376: */
377: public void gen(TokenRangeElement r) {
378: print("Match token range: " + r.beginText + ".." + r.endText);
379: if (r.label != null) {
380: _print(", label = " + r.label);
381: }
382: _println("");
383: }
384:
385: /** Generate code for the given grammar element.
386: * @param blk The token-reference to generate
387: */
388: public void gen(TokenRefElement atom) {
389: print("Match token ");
390: if (atom.not) {
391: _print("NOT ");
392: }
393: _print(atom.atomText);
394: if (atom.label != null) {
395: _print(", label=" + atom.label);
396: }
397: _println("");
398: }
399:
400: public void gen(TreeElement t) {
401: print("Tree reference: " + t);
402: }
403:
404: /** Generate the tree-walker TXT file */
405: public void gen(TreeWalkerGrammar g) throws IOException {
406: setGrammar(g);
407: // Open the output stream for the parser and set the currentOutput
408: antlrTool.reportProgress("Generating " + grammar.getClassName()
409: + TokenTypesFileExt);
410: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
411: + TokenTypesFileExt);
412: //SAS: changed for proper text file io
413:
414: tabs = 0;
415:
416: // Generate the header common to all output files.
417: genHeader();
418:
419: // Output the user-defined parser premamble
420: println("");
421: println("*** Tree-walker Preamble Action.");
422: println("This action will appear before the declaration of your tree-walker class:");
423: tabs++;
424: println(grammar.preambleAction.getText());
425: tabs--;
426: println("*** End of tree-walker Preamble Action");
427:
428: // Generate tree-walker class definition
429: println("");
430: println("*** Your tree-walker class is called '"
431: + grammar.getClassName() + "' and is a subclass of '"
432: + grammar.getSuperClass() + "'.");
433:
434: // Generate user-defined tree-walker class members
435: println("");
436: println("*** User-defined tree-walker class members:");
437: println("These are the member declarations that you defined for your class:");
438: tabs++;
439: printAction(grammar.classMemberAction.getText());
440: tabs--;
441: println("*** End of user-defined tree-walker class members");
442:
443: // Generate code for each rule in the grammar
444: println("");
445: println("*** tree-walker rules:");
446: tabs++;
447:
448: // Enumerate the tree-walker rules
449: Enumeration rules = grammar.rules.elements();
450: while (rules.hasMoreElements()) {
451: println("");
452: // Get the rules from the list and downcast it to proper type
453: GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
454: // Only process tree-walker rules
455: if (sym instanceof RuleSymbol) {
456: genRule((RuleSymbol) sym);
457: }
458: }
459: tabs--;
460: println("");
461: println("*** End of tree-walker rules");
462:
463: println("");
464: println("*** End of tree-walker");
465:
466: // Close the tree-walker output stream
467: currentOutput.close();
468: currentOutput = null;
469: }
470:
471: /** Generate a wildcard element */
472: public void gen(WildcardElement wc) {
473: print("Match wildcard");
474: if (wc.getLabel() != null) {
475: _print(", label = " + wc.getLabel());
476: }
477: _println("");
478: }
479:
480: /** Generate code for the given grammar element.
481: * @param blk The (...)* block to generate
482: */
483: public void gen(ZeroOrMoreBlock blk) {
484: println("Start ZERO-OR-MORE (...)+ block:");
485: tabs++;
486: genBlockPreamble(blk);
487: boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
488: if (!ok) {
489: println("Warning: This zero-or-more block is non-deterministic");
490: }
491: genCommonBlock(blk);
492: tabs--;
493: println("End ZERO-OR-MORE block.");
494: }
495:
496: protected void genAlt(Alternative alt) {
497: for (AlternativeElement elem = alt.head; !(elem instanceof BlockEndElement); elem = elem.next) {
498: elem.generate();
499: }
500: if (alt.getTreeSpecifier() != null) {
501: println("AST will be built as: "
502: + alt.getTreeSpecifier().getText());
503: }
504: }
505:
506: /** Generate the header for a block, which may be a RuleBlock or a
507: * plain AlternativeBLock. This generates any variable declarations,
508: * init-actions, and syntactic-predicate-testing variables.
509: * @blk The block for which the preamble is to be generated.
510: */
511: protected void genBlockPreamble(AlternativeBlock blk) {
512: // dump out init action
513: if (blk.initAction != null) {
514: printAction("Init action: " + blk.initAction);
515: }
516: }
517:
518: /**Generate common code for a block of alternatives; return a postscript
519: * that needs to be generated at the end of the block. Other routines
520: * may append else-clauses and such for error checking before the postfix
521: * is generated.
522: */
523: public void genCommonBlock(AlternativeBlock blk) {
524: boolean singleAlt = (blk.alternatives.size() == 1);
525:
526: println("Start of an alternative block.");
527: tabs++;
528: println("The lookahead set for this block is:");
529: tabs++;
530: genLookaheadSetForBlock(blk);
531: tabs--;
532:
533: if (singleAlt) {
534: println("This block has a single alternative");
535: if (blk.getAlternativeAt(0).synPred != null) {
536: // Generate a warning if there is one alt and it has a synPred
537: println("Warning: you specified a syntactic predicate for this alternative,");
538: println("and it is the only alternative of a block and will be ignored.");
539: }
540: } else {
541: println("This block has multiple alternatives:");
542: tabs++;
543: }
544:
545: for (int i = 0; i < blk.alternatives.size(); i++) {
546: Alternative alt = blk.getAlternativeAt(i);
547: AlternativeElement elem = alt.head;
548:
549: // Print lookahead set for alternate
550: println("");
551: if (i != 0) {
552: print("Otherwise, ");
553: } else {
554: print("");
555: }
556: _println("Alternate(" + (i + 1) + ") will be taken IF:");
557: println("The lookahead set: ");
558: tabs++;
559: genLookaheadSetForAlt(alt);
560: tabs--;
561: if (alt.semPred != null || alt.synPred != null) {
562: print("is matched, AND ");
563: } else {
564: println("is matched.");
565: }
566:
567: // Dump semantic predicates
568: if (alt.semPred != null) {
569: _println("the semantic predicate:");
570: tabs++;
571: println(alt.semPred);
572: if (alt.synPred != null) {
573: print("is true, AND ");
574: } else {
575: println("is true.");
576: }
577: }
578:
579: // Dump syntactic predicate
580: if (alt.synPred != null) {
581: _println("the syntactic predicate:");
582: tabs++;
583: genSynPred(alt.synPred);
584: tabs--;
585: println("is matched.");
586: }
587:
588: // Dump the alternative
589: genAlt(alt);
590: }
591: println("");
592: println("OTHERWISE, a NoViableAlt exception will be thrown");
593: println("");
594:
595: if (!singleAlt) {
596: tabs--;
597: println("End of alternatives");
598: }
599: tabs--;
600: println("End of alternative block.");
601: }
602:
603: /** Generate a textual representation of the follow set
604: * for a block.
605: * @param blk The rule block of interest
606: */
607: public void genFollowSetForRuleBlock(RuleBlock blk) {
608: Lookahead follow = grammar.theLLkAnalyzer
609: .FOLLOW(1, blk.endNode);
610: printSet(grammar.maxk, 1, follow);
611: }
612:
613: /** Generate a header that is common to all TXT files */
614: protected void genHeader() {
615: println("ANTLR-generated file resulting from grammar "
616: + antlrTool.grammarFile);
617: println("Diagnostic output");
618: println("");
619: println("Terence Parr, MageLang Institute");
620: println("with John Lilley, Empathy Software");
621: println("ANTLR Version " + antlrTool.version + "; 1996,1997");
622: println("");
623: println("*** Header Action.");
624: println("This action will appear at the top of all generated files.");
625: tabs++;
626: printAction(behavior.getHeaderAction(""));
627: tabs--;
628: println("*** End of Header Action");
629: println("");
630: }
631:
632: /**Generate the lookahead set for an alternate. */
633: protected void genLookaheadSetForAlt(Alternative alt) {
634: if (doingLexRules && alt.cache[1].containsEpsilon()) {
635: println("MATCHES ALL");
636: return;
637: }
638: int depth = alt.lookaheadDepth;
639: if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
640: // if the decision is nondeterministic, do the best we can: LL(k)
641: // any predicates that are around will be generated later.
642: depth = grammar.maxk;
643: }
644: for (int i = 1; i <= depth; i++) {
645: Lookahead lookahead = alt.cache[i];
646: printSet(depth, i, lookahead);
647: }
648: }
649:
650: /** Generate a textual representation of the lookahead set
651: * for a block.
652: * @param blk The block of interest
653: */
654: public void genLookaheadSetForBlock(AlternativeBlock blk) {
655: // Find the maximal lookahead depth over all alternatives
656: int depth = 0;
657: for (int i = 0; i < blk.alternatives.size(); i++) {
658: Alternative alt = blk.getAlternativeAt(i);
659: if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
660: depth = grammar.maxk;
661: break;
662: } else if (depth < alt.lookaheadDepth) {
663: depth = alt.lookaheadDepth;
664: }
665: }
666:
667: for (int i = 1; i <= depth; i++) {
668: Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
669: printSet(depth, i, lookahead);
670: }
671: }
672:
673: /** Generate the nextToken rule.
674: * nextToken is a synthetic lexer rule that is the implicit OR of all
675: * user-defined lexer rules.
676: */
677: public void genNextToken() {
678: println("");
679: println("*** Lexer nextToken rule:");
680: println("The lexer nextToken rule is synthesized from all of the user-defined");
681: println("lexer rules. It logically consists of one big alternative block with");
682: println("each user-defined rule being an alternative.");
683: println("");
684:
685: // Create the synthesized rule block for nextToken consisting
686: // of an alternate block containing all the user-defined lexer rules.
687: RuleBlock blk = MakeGrammar.createNextTokenRule(grammar,
688: grammar.rules, "nextToken");
689:
690: // Define the nextToken rule symbol
691: RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
692: nextTokenRs.setDefined();
693: nextTokenRs.setBlock(blk);
694: nextTokenRs.access = "private";
695: grammar.define(nextTokenRs);
696:
697: // Analyze the synthesized block
698: if (!grammar.theLLkAnalyzer.deterministic(blk)) {
699: println("The grammar analyzer has determined that the synthesized");
700: println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
701: println("This means that there is some overlap of the character");
702: println("lookahead for two or more of your lexer rules.");
703: }
704:
705: genCommonBlock(blk);
706:
707: println("*** End of nextToken lexer rule.");
708: }
709:
710: /** Generate code for a named rule block
711: * @param s The RuleSymbol describing the rule to generate
712: */
713: public void genRule(RuleSymbol s) {
714: println("");
715: String ruleType = (doingLexRules ? "Lexer" : "Parser");
716: println("*** " + ruleType + " Rule: " + s.getId());
717: if (!s.isDefined()) {
718: println("This rule is undefined.");
719: println("This means that the rule was referenced somewhere in the grammar,");
720: println("but a definition for the rule was not encountered.");
721: println("It is also possible that syntax errors during the parse of");
722: println("your grammar file prevented correct processing of the rule.");
723: println("*** End " + ruleType + " Rule: " + s.getId());
724: return;
725: }
726: tabs++;
727:
728: if (s.access.length() != 0) {
729: println("Access: " + s.access);
730: }
731:
732: // Get rule return type and arguments
733: RuleBlock rblk = s.getBlock();
734:
735: // Gen method return value(s)
736: if (rblk.returnAction != null) {
737: println("Return value(s): " + rblk.returnAction);
738: if (doingLexRules) {
739: println("Error: you specified return value(s) for a lexical rule.");
740: println("\tLexical rules have an implicit return type of 'int'.");
741: }
742: } else {
743: if (doingLexRules) {
744: println("Return value: lexical rule returns an implicit token type");
745: } else {
746: println("Return value: none");
747: }
748: }
749:
750: // Gen arguments
751: if (rblk.argAction != null) {
752: println("Arguments: " + rblk.argAction);
753: }
754:
755: // Dump any init-action
756: genBlockPreamble(rblk);
757:
758: // Analyze the rule
759: boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
760: if (!ok) {
761: println("Error: This rule is non-deterministic");
762: }
763:
764: // Dump the alternates of the rule
765: genCommonBlock(rblk);
766:
767: // Search for an unlabeled exception specification attached to the rule
768: ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
769:
770: // Generate user-defined or default catch phrases
771: if (unlabeledUserSpec != null) {
772: println("You specified error-handler(s) for this rule:");
773: tabs++;
774: for (int i = 0; i < unlabeledUserSpec.handlers.size(); i++) {
775: if (i != 0) {
776: println("");
777: }
778:
779: ExceptionHandler handler = (ExceptionHandler) unlabeledUserSpec.handlers
780: .elementAt(i);
781: println("Error-handler(" + (i + 1) + ") catches ["
782: + handler.exceptionTypeAndName.getText()
783: + "] and executes:");
784: printAction(handler.action.getText());
785: }
786: tabs--;
787: println("End error-handlers.");
788: } else if (!doingLexRules) {
789: println("Default error-handling will be generated, which catches all");
790: println("parser exceptions and consumes tokens until the follow-set is seen.");
791: }
792:
793: // Dump the follow set
794: // Doesn't seem to work for lexical rules...
795: if (!doingLexRules) {
796: println("The follow set for this rule is:");
797: tabs++;
798: genFollowSetForRuleBlock(rblk);
799: tabs--;
800: }
801:
802: tabs--;
803: println("*** End " + ruleType + " Rule: " + s.getId());
804: }
805:
806: /** Generate the syntactic predicate. This basically generates
807: * the alternative block, buts tracks if we are inside a synPred
808: * @param blk The syntactic predicate block
809: */
810: protected void genSynPred(SynPredBlock blk) {
811: syntacticPredLevel++;
812: gen((AlternativeBlock) blk);
813: syntacticPredLevel--;
814: }
815:
816: /** Generate the token types TXT file */
817: protected void genTokenTypes(TokenManager tm) throws IOException {
818: // Open the token output TXT file and set the currentOutput stream
819: antlrTool.reportProgress("Generating " + tm.getName()
820: + TokenTypesFileSuffix + TokenTypesFileExt);
821: currentOutput = antlrTool.openOutputFile(tm.getName()
822: + TokenTypesFileSuffix + TokenTypesFileExt);
823: //SAS: changed for proper text file io
824: tabs = 0;
825:
826: // Generate the header common to all diagnostic files
827: genHeader();
828:
829: // Generate a string for each token. This creates a static
830: // array of Strings indexed by token type.
831: println("");
832: println("*** Tokens used by the parser");
833: println("This is a list of the token numeric values and the corresponding");
834: println("token identifiers. Some tokens are literals, and because of that");
835: println("they have no identifiers. Literals are double-quoted.");
836: tabs++;
837:
838: // Enumerate all the valid token types
839: Vector v = tm.getVocabulary();
840: for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
841: String s = (String) v.elementAt(i);
842: if (s != null) {
843: println(s + " = " + i);
844: }
845: }
846:
847: // Close the interface
848: tabs--;
849: println("*** End of tokens used by the parser");
850:
851: // Close the tokens output file
852: currentOutput.close();
853: currentOutput = null;
854: }
855:
856: /** Get a string for an expression to generate creation of an AST subtree.
857: * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
858: */
859: public String getASTCreateString(Vector v) {
860: return "***Create an AST from a vector here***"
861: + System.getProperty("line.separator");
862: }
863:
864: /** Get a string for an expression to generate creating of an AST node
865: * @param str The arguments to the AST constructor
866: */
867: public String getASTCreateString(GrammarAtom atom, String str) {
868: return "[" + str + "]";
869: }
870:
871: /// unused.
872: protected String processActionForSpecialSymbols(String actionStr,
873: int line, RuleBlock currentRule, ActionTransInfo tInfo) {
874: return actionStr;
875: }
876:
877: /** Map an identifier to it's corresponding tree-node variable.
878: * This is context-sensitive, depending on the rule and alternative
879: * being generated
880: * @param id The identifier name to map
881: * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
882: */
883: public String mapTreeId(String id, ActionTransInfo tInfo) {
884: return id;
885: }
886:
887: /** Format a lookahead or follow set.
888: * @param depth The depth of the entire lookahead/follow
889: * @param k The lookahead level to print
890: * @param lookahead The lookahead/follow set to print
891: */
892: public void printSet(int depth, int k, Lookahead lookahead) {
893: int numCols = 5;
894:
895: int[] elems = lookahead.fset.toArray();
896:
897: if (depth != 1) {
898: print("k==" + k + ": {");
899: } else {
900: print("{ ");
901: }
902: if (elems.length > numCols) {
903: _println("");
904: tabs++;
905: print("");
906: }
907:
908: int column = 0;
909: for (int i = 0; i < elems.length; i++) {
910: column++;
911: if (column > numCols) {
912: _println("");
913: print("");
914: column = 0;
915: }
916: if (doingLexRules) {
917: _print(charFormatter.literalChar(elems[i]));
918: } else {
919: _print((String) grammar.tokenManager.getVocabulary()
920: .elementAt(elems[i]));
921: }
922: if (i != elems.length - 1) {
923: _print(", ");
924: }
925: }
926:
927: if (elems.length > numCols) {
928: _println("");
929: tabs--;
930: print("");
931: }
932: _println(" }");
933: }
934: }
|