001: package antlr;
002:
003: /* ANTLR Translator Generator
004: * Project led by Terence Parr at http://www.cs.usfca.edu
005: * Software rights: http://www.antlr.org/license.html
006: */
007:
008: import java.util.Enumeration;
009:
010: import antlr.collections.impl.BitSet;
011: import antlr.collections.impl.Vector;
012:
013: import java.io.PrintWriter; //SAS: changed for proper text file io
014: import java.io.IOException;
015: import java.io.FileWriter;
016:
017: /**Generate MyParser.txt, MyLexer.txt and MyParserTokenTypes.txt */
018: public class DiagnosticCodeGenerator extends CodeGenerator {
019: /** non-zero if inside syntactic predicate generation */
020: protected int syntacticPredLevel = 0;
021:
022: /** true during lexer generation, false during parser generation */
023: protected boolean doingLexRules = false;
024:
025: /** Create a Diagnostic code-generator using the given Grammar
026: * The caller must still call setTool, setBehavior, and setAnalyzer
027: * before generating code.
028: */
029: public DiagnosticCodeGenerator() {
030: super ();
031: charFormatter = new JavaCharFormatter();
032: }
033:
034: /**Generate the parser, lexer, and token types documentation */
035: public void gen() {
036:
037: // Do the code generation
038: try {
039: // Loop over all grammars
040: Enumeration grammarIter = behavior.grammars.elements();
041: while (grammarIter.hasMoreElements()) {
042: Grammar g = (Grammar) grammarIter.nextElement();
043:
044: // Connect all the components to each other
045: g.setGrammarAnalyzer(analyzer);
046: g.setCodeGenerator(this );
047: analyzer.setGrammar(g);
048:
049: // To get right overloading behavior across hetrogeneous grammars
050: g.generate();
051:
052: if (antlrTool.hasError()) {
053: antlrTool.fatalError("Exiting due to errors.");
054: }
055:
056: }
057:
058: // Loop over all token managers (some of which are lexers)
059: Enumeration tmIter = behavior.tokenManagers.elements();
060: while (tmIter.hasMoreElements()) {
061: TokenManager tm = (TokenManager) tmIter.nextElement();
062: if (!tm.isReadOnly()) {
063: // Write the token manager tokens as Java
064: genTokenTypes(tm);
065: }
066: }
067: } catch (IOException e) {
068: antlrTool.reportException(e, null);
069: }
070: }
071:
072: /** Generate code for the given grammar element.
073: * @param blk The {...} action to generate
074: */
075: public void gen(ActionElement action, Context context) {
076: if (action.isSemPred) {
077: // handled elsewhere
078: } else {
079: print("ACTION: ");
080: _printAction(action.actionText);
081: }
082: }
083:
084: /** Generate code for the given grammar element.
085: * @param blk The "x|y|z|..." block to generate
086: */
087: public void gen(AlternativeBlock blk, Context context) {
088: println("Start of alternative block.");
089: tabs++;
090: genBlockPreamble(blk);
091:
092: boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
093: if (!ok) {
094: println("Warning: This alternative block is non-deterministic");
095: }
096: genCommonBlock(blk);
097: tabs--;
098: }
099:
100: /** Generate code for the given grammar element.
101: * @param blk The block-end element to generate. Block-end
102: * elements are synthesized by the grammar parser to represent
103: * the end of a block.
104: */
105: public void gen(BlockEndElement end, Context context) {
106: // no-op
107: }
108:
109: /** Generate code for the given grammar element.
110: * @param blk The character literal reference to generate
111: */
112: public void gen(CharLiteralElement atom, Context context) {
113: print("Match character ");
114: if (atom.not) {
115: _print("NOT ");
116: }
117: _print(atom.atomText);
118: if (atom.label != null) {
119: _print(", label=" + atom.label);
120: }
121: _println("");
122: }
123:
124: /** Generate code for the given grammar element.
125: * @param blk The character-range reference to generate
126: */
127: public void gen(CharRangeElement r, Context context) {
128: print("Match character range: " + r.beginText + ".."
129: + r.endText);
130: if (r.label != null) {
131: _print(", label = " + r.label);
132: }
133: _println("");
134: }
135:
136: /** Generate the lexer TXT file */
137: public void gen(LexerGrammar g) throws IOException {
138: setGrammar(g);
139: antlrTool.reportProgress("Generating " + grammar.getClassName()
140: + TokenTypesFileExt);
141: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
142: + TokenTypesFileExt);
143: //SAS: changed for proper text file io
144:
145: tabs = 0;
146: doingLexRules = true;
147:
148: // Generate header common to all TXT output files
149: genHeader();
150:
151: // Output the user-defined lexer premamble
152: println("");
153: println("*** Lexer Preamble Action.");
154: println("This action will appear before the declaration of your lexer class:");
155: tabs++;
156: println(grammar.preambleAction.getText());
157: tabs--;
158: println("*** End of Lexer Preamble Action");
159:
160: // Generate lexer class definition
161: println("");
162: println("*** Your lexer class is called '"
163: + grammar.getClassName() + "' and is a subclass of '"
164: + grammar.getSuperClass() + "'.");
165:
166: // Generate user-defined parser class members
167: println("");
168: println("*** User-defined lexer class members:");
169: println("These are the member declarations that you defined for your class:");
170: tabs++;
171: printAction(grammar.classMemberAction.getText());
172: tabs--;
173: println("*** End of user-defined lexer class members");
174:
175: // Generate string literals
176: println("");
177: println("*** String literals used in the parser");
178: println("The following string literals were used in the parser.");
179: println("An actual code generator would arrange to place these literals");
180: println("into a table in the generated lexer, so that actions in the");
181: println("generated lexer could match token text against the literals.");
182: println("String literals used in the lexer are not listed here, as they");
183: println("are incorporated into the mainstream lexer processing.");
184: tabs++;
185: // Enumerate all of the symbols and look for string literal symbols
186: Enumeration ids = grammar.getSymbols();
187: while (ids.hasMoreElements()) {
188: GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
189: // Only processing string literals -- reject other symbol entries
190: if (sym instanceof StringLiteralSymbol) {
191: StringLiteralSymbol s = (StringLiteralSymbol) sym;
192: println(s.getId() + " = " + s.getTokenType());
193: }
194: }
195: tabs--;
196: println("*** End of string literals used by the parser");
197:
198: // Generate nextToken() rule.
199: // nextToken() is a synthetic lexer rule that is the implicit OR of all
200: // user-defined lexer rules.
201: genNextToken();
202:
203: // Generate code for each rule in the lexer
204: println("");
205: println("*** User-defined Lexer rules:");
206: tabs++;
207:
208: ids = grammar.rules.elements();
209: while (ids.hasMoreElements()) {
210: RuleSymbol rs = (RuleSymbol) ids.nextElement();
211: if (!rs.id.equals("mnextToken")) {
212: genRule(rs);
213: }
214: }
215:
216: tabs--;
217: println("");
218: println("*** End User-defined Lexer rules:");
219:
220: // Close the lexer output file
221: currentOutput.close();
222: currentOutput = null;
223: doingLexRules = false;
224: }
225:
226: /** Generate code for the given grammar element.
227: * @param blk The (...)+ block to generate
228: */
229: public void gen(OneOrMoreBlock blk, Context context) {
230: println("Start ONE-OR-MORE (...)+ block:");
231: tabs++;
232: genBlockPreamble(blk);
233: boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
234: if (!ok) {
235: println("Warning: This one-or-more block is non-deterministic");
236: }
237: genCommonBlock(blk);
238: tabs--;
239: println("End ONE-OR-MORE block.");
240: }
241:
242: /** Generate the parser TXT file */
243: public void gen(ParserGrammar g) throws IOException {
244: setGrammar(g);
245: // Open the output stream for the parser and set the currentOutput
246: antlrTool.reportProgress("Generating " + grammar.getClassName()
247: + TokenTypesFileExt);
248: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
249: + TokenTypesFileExt);
250: //SAS: changed for proper text file io
251:
252: tabs = 0;
253:
254: // Generate the header common to all output files.
255: genHeader();
256:
257: // Output the user-defined parser premamble
258: println("");
259: println("*** Parser Preamble Action.");
260: println("This action will appear before the declaration of your parser class:");
261: tabs++;
262: println(grammar.preambleAction.getText());
263: tabs--;
264: println("*** End of Parser Preamble Action");
265:
266: // Generate parser class definition
267: println("");
268: println("*** Your parser class is called '"
269: + grammar.getClassName() + "' and is a subclass of '"
270: + grammar.getSuperClass() + "'.");
271:
272: // Generate user-defined parser class members
273: println("");
274: println("*** User-defined parser class members:");
275: println("These are the member declarations that you defined for your class:");
276: tabs++;
277: printAction(grammar.classMemberAction.getText());
278: tabs--;
279: println("*** End of user-defined parser class members");
280:
281: // Generate code for each rule in the grammar
282: println("");
283: println("*** Parser rules:");
284: tabs++;
285:
286: // Enumerate the parser rules
287: Enumeration rules = grammar.rules.elements();
288: while (rules.hasMoreElements()) {
289: println("");
290: // Get the rules from the list and downcast it to proper type
291: GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
292: // Only process parser rules
293: if (sym instanceof RuleSymbol) {
294: genRule((RuleSymbol) sym);
295: }
296: }
297: tabs--;
298: println("");
299: println("*** End of parser rules");
300:
301: println("");
302: println("*** End of parser");
303:
304: // Close the parser output stream
305: currentOutput.close();
306: currentOutput = null;
307: }
308:
309: /** Generate code for the given grammar element.
310: * @param blk The rule-reference to generate
311: */
312: public void gen(RuleRefElement rr, Context context) {
313: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule);
314:
315: // Generate the actual rule description
316: print("Rule Reference: " + rr.targetRule);
317: if (rr.idAssign != null) {
318: _print(", assigned to '" + rr.idAssign + "'");
319: }
320: if (rr.args != null) {
321: _print(", arguments = " + rr.args);
322: }
323: _println("");
324:
325: // Perform diagnostics
326: if (rs == null || !rs.isDefined()) {
327: println("Rule '" + rr.targetRule
328: + "' is referenced, but that rule is not defined.");
329: println("\tPerhaps the rule is misspelled, or you forgot to define it.");
330: return;
331: }
332: if (!(rs instanceof RuleSymbol)) {
333: // Should this ever happen??
334: println("Rule '"
335: + rr.targetRule
336: + "' is referenced, but that is not a grammar rule.");
337: return;
338: }
339: if (rr.idAssign != null) {
340: // Warn if the rule has no return type
341: if (rs.block.returnAction == null) {
342: println("Error: You assigned from Rule '"
343: + rr.targetRule
344: + "', but that rule has no return type.");
345: }
346: } else {
347: // Warn about return value if any, but not inside syntactic predicate
348: if (!(grammar instanceof LexerGrammar)
349: && syntacticPredLevel == 0
350: && rs.block.returnAction != null) {
351: println("Warning: Rule '" + rr.targetRule
352: + "' returns a value");
353: }
354: }
355: if (rr.args != null && rs.block.argAction == null) {
356: println("Error: Rule '" + rr.targetRule
357: + "' accepts no arguments.");
358: }
359: }
360:
361: /** Generate code for the given grammar element.
362: * @param blk The string-literal reference to generate
363: */
364: public void gen(StringLiteralElement atom, Context context) {
365: print("Match string literal ");
366: _print(atom.atomText);
367: if (atom.label != null) {
368: _print(", label=" + atom.label);
369: }
370: _println("");
371: }
372:
373: /** Generate code for the given grammar element.
374: * @param blk The token-range reference to generate
375: */
376: public void gen(TokenRangeElement r, Context context) {
377: print("Match token range: " + r.beginText + ".." + r.endText);
378: if (r.label != null) {
379: _print(", label = " + r.label);
380: }
381: _println("");
382: }
383:
384: /** Generate code for the given grammar element.
385: * @param blk The token-reference to generate
386: */
387: public void gen(TokenRefElement atom, Context context) {
388: print("Match token ");
389: if (atom.not) {
390: _print("NOT ");
391: }
392: _print(atom.atomText);
393: if (atom.label != null) {
394: _print(", label=" + atom.label);
395: }
396: _println("");
397: }
398:
399: public void gen(TreeElement t, Context context) {
400: print("Tree reference: " + t);
401: }
402:
403: /** Generate the tree-walker TXT file */
404: public void gen(TreeWalkerGrammar g) throws IOException {
405: setGrammar(g);
406: // Open the output stream for the parser and set the currentOutput
407: antlrTool.reportProgress("Generating " + grammar.getClassName()
408: + TokenTypesFileExt);
409: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
410: + TokenTypesFileExt);
411: //SAS: changed for proper text file io
412:
413: tabs = 0;
414:
415: // Generate the header common to all output files.
416: genHeader();
417:
418: // Output the user-defined parser premamble
419: println("");
420: println("*** Tree-walker Preamble Action.");
421: println("This action will appear before the declaration of your tree-walker class:");
422: tabs++;
423: println(grammar.preambleAction.getText());
424: tabs--;
425: println("*** End of tree-walker Preamble Action");
426:
427: // Generate tree-walker class definition
428: println("");
429: println("*** Your tree-walker class is called '"
430: + grammar.getClassName() + "' and is a subclass of '"
431: + grammar.getSuperClass() + "'.");
432:
433: // Generate user-defined tree-walker class members
434: println("");
435: println("*** User-defined tree-walker class members:");
436: println("These are the member declarations that you defined for your class:");
437: tabs++;
438: printAction(grammar.classMemberAction.getText());
439: tabs--;
440: println("*** End of user-defined tree-walker class members");
441:
442: // Generate code for each rule in the grammar
443: println("");
444: println("*** tree-walker rules:");
445: tabs++;
446:
447: // Enumerate the tree-walker rules
448: Enumeration rules = grammar.rules.elements();
449: while (rules.hasMoreElements()) {
450: println("");
451: // Get the rules from the list and downcast it to proper type
452: GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
453: // Only process tree-walker rules
454: if (sym instanceof RuleSymbol) {
455: genRule((RuleSymbol) sym);
456: }
457: }
458: tabs--;
459: println("");
460: println("*** End of tree-walker rules");
461:
462: println("");
463: println("*** End of tree-walker");
464:
465: // Close the tree-walker output stream
466: currentOutput.close();
467: currentOutput = null;
468: }
469:
470: /** Generate a wildcard element */
471: public void gen(WildcardElement wc, Context context) {
472: print("Match wildcard");
473: if (wc.getLabel() != null) {
474: _print(", label = " + wc.getLabel());
475: }
476: _println("");
477: }
478:
479: /** Generate code for the given grammar element.
480: * @param blk The (...)* block to generate
481: */
482: public void gen(ZeroOrMoreBlock blk, Context context) {
483: println("Start ZERO-OR-MORE (...)+ block:");
484: tabs++;
485: genBlockPreamble(blk);
486: boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
487: if (!ok) {
488: println("Warning: This zero-or-more block is non-deterministic");
489: }
490: genCommonBlock(blk);
491: tabs--;
492: println("End ZERO-OR-MORE block.");
493: }
494:
495: protected void genAlt(Alternative alt) {
496: for (AlternativeElement elem = alt.head; !(elem instanceof BlockEndElement); elem = elem.next) {
497: elem.generate(null);
498: }
499: if (alt.getTreeSpecifier() != null) {
500: println("AST will be built as: "
501: + alt.getTreeSpecifier().getText());
502: }
503: }
504:
505: /** Generate the header for a block, which may be a RuleBlock or a
506: * plain AlternativeBLock. This generates any variable declarations,
507: * init-actions, and syntactic-predicate-testing variables.
508: * @blk The block for which the preamble is to be generated.
509: */
510: protected void genBlockPreamble(AlternativeBlock blk) {
511: // dump out init action
512: if (blk.initAction != null) {
513: printAction("Init action: " + blk.initAction);
514: }
515: }
516:
517: /**Generate common code for a block of alternatives; return a postscript
518: * that needs to be generated at the end of the block. Other routines
519: * may append else-clauses and such for error checking before the postfix
520: * is generated.
521: */
522: public void genCommonBlock(AlternativeBlock blk) {
523: boolean singleAlt = (blk.alternatives.size() == 1);
524:
525: println("Start of an alternative block.");
526: tabs++;
527: println("The lookahead set for this block is:");
528: tabs++;
529: genLookaheadSetForBlock(blk);
530: tabs--;
531:
532: if (singleAlt) {
533: println("This block has a single alternative");
534: if (blk.getAlternativeAt(0).synPred != null) {
535: // Generate a warning if there is one alt and it has a synPred
536: println("Warning: you specified a syntactic predicate for this alternative,");
537: println("and it is the only alternative of a block and will be ignored.");
538: }
539: } else {
540: println("This block has multiple alternatives:");
541: tabs++;
542: }
543:
544: for (int i = 0; i < blk.alternatives.size(); i++) {
545: Alternative alt = blk.getAlternativeAt(i);
546: AlternativeElement elem = alt.head;
547:
548: // Print lookahead set for alternate
549: println("");
550: if (i != 0) {
551: print("Otherwise, ");
552: } else {
553: print("");
554: }
555: _println("Alternate(" + (i + 1) + ") will be taken IF:");
556: println("The lookahead set: ");
557: tabs++;
558: genLookaheadSetForAlt(alt);
559: tabs--;
560: if (alt.semPred != null || alt.synPred != null) {
561: print("is matched, AND ");
562: } else {
563: println("is matched.");
564: }
565:
566: // Dump semantic predicates
567: if (alt.semPred != null) {
568: _println("the semantic predicate:");
569: tabs++;
570: println(alt.semPred);
571: if (alt.synPred != null) {
572: print("is true, AND ");
573: } else {
574: println("is true.");
575: }
576: }
577:
578: // Dump syntactic predicate
579: if (alt.synPred != null) {
580: _println("the syntactic predicate:");
581: tabs++;
582: genSynPred(alt.synPred);
583: tabs--;
584: println("is matched.");
585: }
586:
587: // Dump the alternative
588: genAlt(alt);
589: }
590: println("");
591: println("OTHERWISE, a NoViableAlt exception will be thrown");
592: println("");
593:
594: if (!singleAlt) {
595: tabs--;
596: println("End of alternatives");
597: }
598: tabs--;
599: println("End of alternative block.");
600: }
601:
602: /** Generate a textual representation of the follow set
603: * for a block.
604: * @param blk The rule block of interest
605: */
606: public void genFollowSetForRuleBlock(RuleBlock blk) {
607: Lookahead follow = grammar.theLLkAnalyzer
608: .FOLLOW(1, blk.endNode);
609: printSet(grammar.maxk, 1, follow);
610: }
611:
612: /** Generate a header that is common to all TXT files */
613: protected void genHeader() {
614: println("ANTLR-generated file resulting from grammar "
615: + antlrTool.grammarFile);
616: println("Diagnostic output");
617: println("");
618: println("Terence Parr, MageLang Institute");
619: println("with John Lilley, Empathy Software");
620: println("ANTLR Version " + antlrTool.version + "; 1989-2005");
621: println("");
622: println("*** Header Action.");
623: println("This action will appear at the top of all generated files.");
624: tabs++;
625: printAction(behavior.getHeaderAction(""));
626: tabs--;
627: println("*** End of Header Action");
628: println("");
629: }
630:
631: /**Generate the lookahead set for an alternate. */
632: protected void genLookaheadSetForAlt(Alternative alt) {
633: if (doingLexRules && alt.cache[1].containsEpsilon()) {
634: println("MATCHES ALL");
635: return;
636: }
637: int depth = alt.lookaheadDepth;
638: if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
639: // if the decision is nondeterministic, do the best we can: LL(k)
640: // any predicates that are around will be generated later.
641: depth = grammar.maxk;
642: }
643: for (int i = 1; i <= depth; i++) {
644: Lookahead lookahead = alt.cache[i];
645: printSet(depth, i, lookahead);
646: }
647: }
648:
649: /** Generate a textual representation of the lookahead set
650: * for a block.
651: * @param blk The block of interest
652: */
653: public void genLookaheadSetForBlock(AlternativeBlock blk) {
654: // Find the maximal lookahead depth over all alternatives
655: int depth = 0;
656: for (int i = 0; i < blk.alternatives.size(); i++) {
657: Alternative alt = blk.getAlternativeAt(i);
658: if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
659: depth = grammar.maxk;
660: break;
661: } else if (depth < alt.lookaheadDepth) {
662: depth = alt.lookaheadDepth;
663: }
664: }
665:
666: for (int i = 1; i <= depth; i++) {
667: Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
668: printSet(depth, i, lookahead);
669: }
670: }
671:
672: /** Generate the nextToken rule.
673: * nextToken is a synthetic lexer rule that is the implicit OR of all
674: * user-defined lexer rules.
675: */
676: public void genNextToken() {
677: println("");
678: println("*** Lexer nextToken rule:");
679: println("The lexer nextToken rule is synthesized from all of the user-defined");
680: println("lexer rules. It logically consists of one big alternative block with");
681: println("each user-defined rule being an alternative.");
682: println("");
683:
684: // Create the synthesized rule block for nextToken consisting
685: // of an alternate block containing all the user-defined lexer rules.
686: RuleBlock blk = MakeGrammar.createNextTokenRule(grammar,
687: grammar.rules, "nextToken");
688:
689: // Define the nextToken rule symbol
690: RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
691: nextTokenRs.setDefined();
692: nextTokenRs.setBlock(blk);
693: nextTokenRs.access = "private";
694: grammar.define(nextTokenRs);
695:
696: // Analyze the synthesized block
697: if (!grammar.theLLkAnalyzer.deterministic(blk)) {
698: println("The grammar analyzer has determined that the synthesized");
699: println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
700: println("This means that there is some overlap of the character");
701: println("lookahead for two or more of your lexer rules.");
702: }
703:
704: genCommonBlock(blk);
705:
706: println("*** End of nextToken lexer rule.");
707: }
708:
709: /** Generate code for a named rule block
710: * @param s The RuleSymbol describing the rule to generate
711: */
712: public void genRule(RuleSymbol s) {
713: println("");
714: String ruleType = (doingLexRules ? "Lexer" : "Parser");
715: println("*** " + ruleType + " Rule: " + s.getId());
716: if (!s.isDefined()) {
717: println("This rule is undefined.");
718: println("This means that the rule was referenced somewhere in the grammar,");
719: println("but a definition for the rule was not encountered.");
720: println("It is also possible that syntax errors during the parse of");
721: println("your grammar file prevented correct processing of the rule.");
722: println("*** End " + ruleType + " Rule: " + s.getId());
723: return;
724: }
725: tabs++;
726:
727: if (s.access.length() != 0) {
728: println("Access: " + s.access);
729: }
730:
731: // Get rule return type and arguments
732: RuleBlock rblk = s.getBlock();
733:
734: // Gen method return value(s)
735: if (rblk.returnAction != null) {
736: println("Return value(s): " + rblk.returnAction);
737: if (doingLexRules) {
738: println("Error: you specified return value(s) for a lexical rule.");
739: println("\tLexical rules have an implicit return type of 'int'.");
740: }
741: } else {
742: if (doingLexRules) {
743: println("Return value: lexical rule returns an implicit token type");
744: } else {
745: println("Return value: none");
746: }
747: }
748:
749: // Gen arguments
750: if (rblk.argAction != null) {
751: println("Arguments: " + rblk.argAction);
752: }
753:
754: // Dump any init-action
755: genBlockPreamble(rblk);
756:
757: // Analyze the rule
758: boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
759: if (!ok) {
760: println("Error: This rule is non-deterministic");
761: }
762:
763: // Dump the alternates of the rule
764: genCommonBlock(rblk);
765:
766: // Search for an unlabeled exception specification attached to the rule
767: ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
768:
769: // Generate user-defined or default catch phrases
770: if (unlabeledUserSpec != null) {
771: println("You specified error-handler(s) for this rule:");
772: tabs++;
773: for (int i = 0; i < unlabeledUserSpec.handlers.size(); i++) {
774: if (i != 0) {
775: println("");
776: }
777:
778: ExceptionHandler handler = (ExceptionHandler) unlabeledUserSpec.handlers
779: .elementAt(i);
780: println("Error-handler(" + (i + 1) + ") catches ["
781: + handler.exceptionTypeAndName.getText()
782: + "] and executes:");
783: printAction(handler.action.getText());
784: }
785: tabs--;
786: println("End error-handlers.");
787: } else if (!doingLexRules) {
788: println("Default error-handling will be generated, which catches all");
789: println("parser exceptions and consumes tokens until the follow-set is seen.");
790: }
791:
792: // Dump the follow set
793: // Doesn't seem to work for lexical rules...
794: if (!doingLexRules) {
795: println("The follow set for this rule is:");
796: tabs++;
797: genFollowSetForRuleBlock(rblk);
798: tabs--;
799: }
800:
801: tabs--;
802: println("*** End " + ruleType + " Rule: " + s.getId());
803: }
804:
805: /** Generate the syntactic predicate. This basically generates
806: * the alternative block, buts tracks if we are inside a synPred
807: * @param blk The syntactic predicate block
808: */
809: protected void genSynPred(SynPredBlock blk) {
810: syntacticPredLevel++;
811: gen((AlternativeBlock) blk, null);
812: syntacticPredLevel--;
813: }
814:
815: /** Generate the token types TXT file */
816: protected void genTokenTypes(TokenManager tm) throws IOException {
817: // Open the token output TXT file and set the currentOutput stream
818: antlrTool.reportProgress("Generating " + tm.getName()
819: + TokenTypesFileSuffix + TokenTypesFileExt);
820: currentOutput = antlrTool.openOutputFile(tm.getName()
821: + TokenTypesFileSuffix + TokenTypesFileExt);
822: //SAS: changed for proper text file io
823: tabs = 0;
824:
825: // Generate the header common to all diagnostic files
826: genHeader();
827:
828: // Generate a string for each token. This creates a static
829: // array of Strings indexed by token type.
830: println("");
831: println("*** Tokens used by the parser");
832: println("This is a list of the token numeric values and the corresponding");
833: println("token identifiers. Some tokens are literals, and because of that");
834: println("they have no identifiers. Literals are double-quoted.");
835: tabs++;
836:
837: // Enumerate all the valid token types
838: Vector v = tm.getVocabulary();
839: for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
840: String s = (String) v.elementAt(i);
841: if (s != null) {
842: println(s + " = " + i);
843: }
844: }
845:
846: // Close the interface
847: tabs--;
848: println("*** End of tokens used by the parser");
849:
850: // Close the tokens output file
851: currentOutput.close();
852: currentOutput = null;
853: }
854:
855: /** Get a string for an expression to generate creation of an AST subtree.
856: * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
857: */
858: public String getASTCreateString(Vector v) {
859: return "***Create an AST from a vector here***"
860: + System.getProperty("line.separator");
861: }
862:
863: /** Get a string for an expression to generate creating of an AST node
864: * @param str The arguments to the AST constructor
865: */
866: public String getASTCreateString(GrammarAtom atom, String str) {
867: return "[" + str + "]";
868: }
869:
870: /// unused.
871: protected String processActionForSpecialSymbols(String actionStr,
872: int line, RuleBlock currentRule, ActionTransInfo tInfo) {
873: return actionStr;
874: }
875:
876: /** Map an identifier to it's corresponding tree-node variable.
877: * This is context-sensitive, depending on the rule and alternative
878: * being generated
879: * @param id The identifier name to map
880: * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
881: */
882: public String mapTreeId(String id, ActionTransInfo tInfo) {
883: return id;
884: }
885:
886: /** Format a lookahead or follow set.
887: * @param depth The depth of the entire lookahead/follow
888: * @param k The lookahead level to print
889: * @param lookahead The lookahead/follow set to print
890: */
891: public void printSet(int depth, int k, Lookahead lookahead) {
892: int numCols = 5;
893:
894: int[] elems = lookahead.fset.toArray();
895:
896: if (depth != 1) {
897: print("k==" + k + ": {");
898: } else {
899: print("{ ");
900: }
901: if (elems.length > numCols) {
902: _println("");
903: tabs++;
904: print("");
905: }
906:
907: int column = 0;
908: for (int i = 0; i < elems.length; i++) {
909: column++;
910: if (column > numCols) {
911: _println("");
912: print("");
913: column = 0;
914: }
915: if (doingLexRules) {
916: _print(charFormatter.literalChar(elems[i]));
917: } else {
918: _print((String) grammar.tokenManager.getVocabulary()
919: .elementAt(elems[i]));
920: }
921: if (i != elems.length - 1) {
922: _print(", ");
923: }
924: }
925:
926: if (elems.length > numCols) {
927: _println("");
928: tabs--;
929: print("");
930: }
931: _println(" }");
932: }
933: }
|