001: package antlr;
002:
003: /* ANTLR Translator Generator
004: * Project led by Terence Parr at http://www.cs.usfca.edu
005: * Software rights: http://www.antlr.org/license.html
006: */
007:
008: import java.util.Enumeration;
009:
010: import antlr.collections.impl.BitSet;
011: import antlr.collections.impl.Vector;
012:
013: import java.io.PrintWriter; //SAS: changed for proper text file io
014: import java.io.IOException;
015: import java.io.FileWriter;
016:
017: /**Generate P.html, a cross-linked representation of P with or without actions */
018: public class HTMLCodeGenerator extends CodeGenerator {
019: /** non-zero if inside syntactic predicate generation */
020: protected int syntacticPredLevel = 0;
021:
022: /** true during lexer generation, false during parser generation */
023: protected boolean doingLexRules = false;
024:
025: protected boolean firstElementInAlt;
026:
027: protected AlternativeElement prevAltElem = null; // what was generated last?
028:
029: /** Create a Diagnostic code-generator using the given Grammar
030: * The caller must still call setTool, setBehavior, and setAnalyzer
031: * before generating code.
032: */
033: public HTMLCodeGenerator() {
034: super ();
035: charFormatter = new JavaCharFormatter();
036: }
037:
038: /** Encode a string for printing in a HTML document..
039: * e.g. encode '<' '>' and similar stuff
040: * @param s the string to encode
041: */
042: static String HTMLEncode(String s) {
043: StringBuffer buf = new StringBuffer();
044:
045: for (int i = 0, len = s.length(); i < len; i++) {
046: char c = s.charAt(i);
047: if (c == '&')
048: buf.append("&");
049: else if (c == '\"')
050: buf.append(""");
051: else if (c == '\'')
052: buf.append("'");
053: else if (c == '<')
054: buf.append("<");
055: else if (c == '>')
056: buf.append(">");
057: else
058: buf.append(c);
059: }
060: return buf.toString();
061: }
062:
063: public void gen() {
064: // Do the code generation
065: try {
066: // Loop over all grammars
067: Enumeration grammarIter = behavior.grammars.elements();
068: while (grammarIter.hasMoreElements()) {
069: Grammar g = (Grammar) grammarIter.nextElement();
070:
071: // Connect all the components to each other
072: /*
073: g.setGrammarAnalyzer(analyzer);
074: analyzer.setGrammar(g);
075: */
076: g.setCodeGenerator(this );
077:
078: // To get right overloading behavior across hetrogeneous grammars
079: g.generate();
080:
081: if (antlrTool.hasError()) {
082: antlrTool.fatalError("Exiting due to errors.");
083: }
084:
085: }
086:
087: } catch (IOException e) {
088: antlrTool.reportException(e, null);
089: }
090: }
091:
092: /** Generate code for the given grammar element.
093: * @param blk The {...} action to generate
094: */
095: public void gen(ActionElement action, Context context) {
096: // no-op
097: }
098:
099: /** Generate code for the given grammar element.
100: * @param blk The "x|y|z|..." block to generate
101: */
102: public void gen(AlternativeBlock blk, Context context) {
103: genGenericBlock(blk, "");
104: }
105:
106: /** Generate code for the given grammar element.
107: * @param blk The block-end element to generate. Block-end
108: * elements are synthesized by the grammar parser to represent
109: * the end of a block.
110: */
111: public void gen(BlockEndElement end, Context context) {
112: // no-op
113: }
114:
115: /** Generate code for the given grammar element.
116: * @param blk The character literal reference to generate
117: */
118: public void gen(CharLiteralElement atom, Context context) {
119: if (atom.not) {
120: _print("~");
121: }
122: _print(HTMLEncode(atom.atomText) + " ");
123: }
124:
125: /** Generate code for the given grammar element.
126: * @param blk The character-range reference to generate
127: */
128: public void gen(CharRangeElement r, Context context) {
129: print(r.beginText + ".." + r.endText + " ");
130: }
131:
132: /** Generate the lexer HTML file */
133: public void gen(LexerGrammar g) throws IOException {
134: setGrammar(g);
135: antlrTool.reportProgress("Generating " + grammar.getClassName()
136: + ".html");
137: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
138: + ".html");
139: //SAS: changed for proper text file io
140:
141: tabs = 0;
142: doingLexRules = true;
143:
144: // Generate header common to all TXT output files
145: genHeader();
146:
147: // Output the user-defined lexer premamble
148: // RK: guess not..
149: // println(grammar.preambleAction.getText());
150:
151: // Generate lexer class definition
152: println("");
153:
154: // print javadoc comment if any
155: if (grammar.comment != null) {
156: _println(HTMLEncode(grammar.comment));
157: }
158:
159: println("Definition of lexer " + grammar.getClassName()
160: + ", which is a subclass of " + grammar.getSuperClass()
161: + ".");
162:
163: // Generate user-defined parser class members
164: // printAction(grammar.classMemberAction.getText());
165:
166: /*
167: // Generate string literals
168: println("");
169: println("*** String literals used in the parser");
170: println("The following string literals were used in the parser.");
171: println("An actual code generator would arrange to place these literals");
172: println("into a table in the generated lexer, so that actions in the");
173: println("generated lexer could match token text against the literals.");
174: println("String literals used in the lexer are not listed here, as they");
175: println("are incorporated into the mainstream lexer processing.");
176: tabs++;
177: // Enumerate all of the symbols and look for string literal symbols
178: Enumeration ids = grammar.getSymbols();
179: while ( ids.hasMoreElements() ) {
180: GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
181: // Only processing string literals -- reject other symbol entries
182: if ( sym instanceof StringLiteralSymbol ) {
183: StringLiteralSymbol s = (StringLiteralSymbol)sym;
184: println(s.getId() + " = " + s.getTokenType());
185: }
186: }
187: tabs--;
188: println("*** End of string literals used by the parser");
189: */
190:
191: // Generate nextToken() rule.
192: // nextToken() is a synthetic lexer rule that is the implicit OR of all
193: // user-defined lexer rules.
194: genNextToken();
195:
196: // Generate code for each rule in the lexer
197:
198: Enumeration ids = grammar.rules.elements();
199: while (ids.hasMoreElements()) {
200: RuleSymbol rs = (RuleSymbol) ids.nextElement();
201: if (!rs.id.equals("mnextToken")) {
202: genRule(rs);
203: }
204: }
205:
206: // Close the lexer output file
207: currentOutput.close();
208: currentOutput = null;
209: doingLexRules = false;
210: }
211:
212: /** Generate code for the given grammar element.
213: * @param blk The (...)+ block to generate
214: */
215: public void gen(OneOrMoreBlock blk, Context context) {
216: genGenericBlock(blk, "+");
217: }
218:
219: /** Generate the parser HTML file */
220: public void gen(ParserGrammar g) throws IOException {
221: setGrammar(g);
222: // Open the output stream for the parser and set the currentOutput
223: antlrTool.reportProgress("Generating " + grammar.getClassName()
224: + ".html");
225: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
226: + ".html");
227:
228: tabs = 0;
229:
230: // Generate the header common to all output files.
231: genHeader();
232:
233: // Generate parser class definition
234: println("");
235:
236: // print javadoc comment if any
237: if (grammar.comment != null) {
238: _println(HTMLEncode(grammar.comment));
239: }
240:
241: println("Definition of parser " + grammar.getClassName()
242: + ", which is a subclass of " + grammar.getSuperClass()
243: + ".");
244:
245: // Enumerate the parser rules
246: Enumeration rules = grammar.rules.elements();
247: while (rules.hasMoreElements()) {
248: println("");
249: // Get the rules from the list and downcast it to proper type
250: GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
251: // Only process parser rules
252: if (sym instanceof RuleSymbol) {
253: genRule((RuleSymbol) sym);
254: }
255: }
256: tabs--;
257: println("");
258:
259: genTail();
260:
261: // Close the parser output stream
262: currentOutput.close();
263: currentOutput = null;
264: }
265:
266: /** Generate code for the given grammar element.
267: * @param blk The rule-reference to generate
268: */
269: public void gen(RuleRefElement rr, Context context) {
270: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule);
271:
272: // Generate the actual rule description
273: _print("<a href=\"" + grammar.getClassName() + ".html#"
274: + rr.targetRule + "\">");
275: _print(rr.targetRule);
276: _print("</a>");
277: // RK: Leave out args..
278: // if (rr.args != null) {
279: // _print("["+rr.args+"]");
280: // }
281: _print(" ");
282: }
283:
284: /** Generate code for the given grammar element.
285: * @param blk The string-literal reference to generate
286: */
287: public void gen(StringLiteralElement atom, Context context) {
288: if (atom.not) {
289: _print("~");
290: }
291: _print(HTMLEncode(atom.atomText));
292: _print(" ");
293: }
294:
295: /** Generate code for the given grammar element.
296: * @param blk The token-range reference to generate
297: */
298: public void gen(TokenRangeElement r, Context context) {
299: print(r.beginText + ".." + r.endText + " ");
300: }
301:
302: /** Generate code for the given grammar element.
303: * @param blk The token-reference to generate
304: */
305: public void gen(TokenRefElement atom, Context context) {
306: if (atom.not) {
307: _print("~");
308: }
309: _print(atom.atomText);
310: _print(" ");
311: }
312:
313: public void gen(TreeElement t, Context context) {
314: print(t + " ");
315: }
316:
317: /** Generate the tree-walker TXT file */
318: public void gen(TreeWalkerGrammar g) throws IOException {
319: setGrammar(g);
320: // Open the output stream for the parser and set the currentOutput
321: antlrTool.reportProgress("Generating " + grammar.getClassName()
322: + ".html");
323: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
324: + ".html");
325: //SAS: changed for proper text file io
326:
327: tabs = 0;
328:
329: // Generate the header common to all output files.
330: genHeader();
331:
332: // Output the user-defined parser premamble
333: println("");
334: // println("*** Tree-walker Preamble Action.");
335: // println("This action will appear before the declaration of your tree-walker class:");
336: // tabs++;
337: // println(grammar.preambleAction.getText());
338: // tabs--;
339: // println("*** End of tree-walker Preamble Action");
340:
341: // Generate tree-walker class definition
342: println("");
343:
344: // print javadoc comment if any
345: if (grammar.comment != null) {
346: _println(HTMLEncode(grammar.comment));
347: }
348:
349: println("Definition of tree parser " + grammar.getClassName()
350: + ", which is a subclass of " + grammar.getSuperClass()
351: + ".");
352:
353: // Generate user-defined tree-walker class members
354: // println("");
355: // println("*** User-defined tree-walker class members:");
356: // println("These are the member declarations that you defined for your class:");
357: // tabs++;
358: // printAction(grammar.classMemberAction.getText());
359: // tabs--;
360: // println("*** End of user-defined tree-walker class members");
361:
362: // Generate code for each rule in the grammar
363: println("");
364: // println("*** tree-walker rules:");
365: tabs++;
366:
367: // Enumerate the tree-walker rules
368: Enumeration rules = grammar.rules.elements();
369: while (rules.hasMoreElements()) {
370: println("");
371: // Get the rules from the list and downcast it to proper type
372: GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
373: // Only process tree-walker rules
374: if (sym instanceof RuleSymbol) {
375: genRule((RuleSymbol) sym);
376: }
377: }
378: tabs--;
379: println("");
380: // println("*** End of tree-walker rules");
381:
382: // println("");
383: // println("*** End of tree-walker");
384:
385: // Close the tree-walker output stream
386: currentOutput.close();
387: currentOutput = null;
388: }
389:
390: /** Generate a wildcard element */
391: public void gen(WildcardElement wc, Context context) {
392: /*
393: if ( wc.getLabel()!=null ) {
394: _print(wc.getLabel()+"=");
395: }
396: */
397: _print(". ");
398: }
399:
400: /** Generate code for the given grammar element.
401: * @param blk The (...)* block to generate
402: */
403: public void gen(ZeroOrMoreBlock blk, Context context) {
404: genGenericBlock(blk, "*");
405: }
406:
407: protected void genAlt(Alternative alt) {
408: if (alt.getTreeSpecifier() != null) {
409: _print(alt.getTreeSpecifier().getText());
410: }
411: prevAltElem = null;
412: for (AlternativeElement elem = alt.head; !(elem instanceof BlockEndElement); elem = elem.next) {
413: elem.generate(null);
414: firstElementInAlt = false;
415: prevAltElem = elem;
416: }
417: }
418:
419: /** Generate the header for a block, which may be a RuleBlock or a
420: * plain AlternativeBLock. This generates any variable declarations,
421: * init-actions, and syntactic-predicate-testing variables.
422: * @blk The block for which the preamble is to be generated.
423: */
424: // protected void genBlockPreamble(AlternativeBlock blk) {
425: // RK: don't dump out init actions
426: // dump out init action
427: // if ( blk.initAction!=null ) {
428: // printAction("{" + blk.initAction + "}");
429: // }
430: // }
431: /**Generate common code for a block of alternatives; return a postscript
432: * that needs to be generated at the end of the block. Other routines
433: * may append else-clauses and such for error checking before the postfix
434: * is generated.
435: */
436: public void genCommonBlock(AlternativeBlock blk) {
437: for (int i = 0; i < blk.alternatives.size(); i++) {
438: Alternative alt = blk.getAlternativeAt(i);
439: AlternativeElement elem = alt.head;
440:
441: // dump alt operator |
442: if (i > 0 && blk.alternatives.size() > 1) {
443: _println("");
444: print("|\t");
445: }
446:
447: // Dump the alternative, starting with predicates
448: //
449: boolean save = firstElementInAlt;
450: firstElementInAlt = true;
451: tabs++; // in case we do a newline in alt, increase the tab indent
452:
453: // RK: don't dump semantic/syntactic predicates
454: // only obscures grammar.
455: //
456: // Dump semantic predicates
457: //
458: // if (alt.semPred != null) {
459: // println("{" + alt.semPred + "}?");
460: // }
461: // Dump syntactic predicate
462: // if (alt.synPred != null) {
463: // genSynPred(alt.synPred);
464: // }
465: genAlt(alt);
466: tabs--;
467: firstElementInAlt = save;
468: }
469: }
470:
471: /** Generate a textual representation of the follow set
472: * for a block.
473: * @param blk The rule block of interest
474: */
475: public void genFollowSetForRuleBlock(RuleBlock blk) {
476: Lookahead follow = grammar.theLLkAnalyzer
477: .FOLLOW(1, blk.endNode);
478: printSet(grammar.maxk, 1, follow);
479: }
480:
481: protected void genGenericBlock(AlternativeBlock blk, String blkOp) {
482: if (blk.alternatives.size() > 1) {
483: // make sure we start on a new line
484: if (!firstElementInAlt) {
485: // only do newline if the last element wasn't a multi-line block
486: if (prevAltElem == null
487: || !(prevAltElem instanceof AlternativeBlock)
488: || ((AlternativeBlock) prevAltElem).alternatives
489: .size() == 1) {
490: _println("");
491: print("(\t");
492: } else {
493: _print("(\t");
494: }
495: // _println("");
496: // print("(\t");
497: } else {
498: _print("(\t");
499: }
500: } else {
501: _print("( ");
502: }
503: // RK: don't dump init actions
504: // genBlockPreamble(blk);
505: genCommonBlock(blk);
506: if (blk.alternatives.size() > 1) {
507: _println("");
508: print(")" + blkOp + " ");
509: // if not last element of alt, need newline & to indent
510: if (!(blk.next instanceof BlockEndElement)) {
511: _println("");
512: print("");
513: }
514: } else {
515: _print(")" + blkOp + " ");
516: }
517: }
518:
519: /** Generate a header that is common to all TXT files */
520: protected void genHeader() {
521: println("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">");
522: println("<HTML>");
523: println("<HEAD>");
524: println("<TITLE>Grammar " + antlrTool.grammarFile + "</TITLE>");
525: println("</HEAD>");
526: println("<BODY>");
527: println("<table summary=\"\" border=\"1\" cellpadding=\"5\">");
528: println("<tr>");
529: println("<td>");
530: println("<font size=\"+2\">Grammar " + grammar.getClassName()
531: + "</font><br>");
532: println("<a href=\"http://www.ANTLR.org\">ANTLR</a>-generated HTML file from "
533: + antlrTool.grammarFile);
534: println("<p>");
535: println("Terence Parr, <a href=\"http://www.magelang.com\">MageLang Institute</a>");
536: println("<br>ANTLR Version " + antlrTool.version
537: + "; 1989-2005");
538: println("</td>");
539: println("</tr>");
540: println("</table>");
541: println("<PRE>");
542: // RK: see no reason for printing include files and stuff...
543: // tabs++;
544: // printAction(behavior.getHeaderAction(""));
545: // tabs--;
546: }
547:
548: /**Generate the lookahead set for an alternate. */
549: protected void genLookaheadSetForAlt(Alternative alt) {
550: if (doingLexRules && alt.cache[1].containsEpsilon()) {
551: println("MATCHES ALL");
552: return;
553: }
554: int depth = alt.lookaheadDepth;
555: if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
556: // if the decision is nondeterministic, do the best we can: LL(k)
557: // any predicates that are around will be generated later.
558: depth = grammar.maxk;
559: }
560: for (int i = 1; i <= depth; i++) {
561: Lookahead lookahead = alt.cache[i];
562: printSet(depth, i, lookahead);
563: }
564: }
565:
566: /** Generate a textual representation of the lookahead set
567: * for a block.
568: * @param blk The block of interest
569: */
570: public void genLookaheadSetForBlock(AlternativeBlock blk) {
571: // Find the maximal lookahead depth over all alternatives
572: int depth = 0;
573: for (int i = 0; i < blk.alternatives.size(); i++) {
574: Alternative alt = blk.getAlternativeAt(i);
575: if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
576: depth = grammar.maxk;
577: break;
578: } else if (depth < alt.lookaheadDepth) {
579: depth = alt.lookaheadDepth;
580: }
581: }
582:
583: for (int i = 1; i <= depth; i++) {
584: Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
585: printSet(depth, i, lookahead);
586: }
587: }
588:
589: /** Generate the nextToken rule.
590: * nextToken is a synthetic lexer rule that is the implicit OR of all
591: * user-defined lexer rules.
592: */
593: public void genNextToken() {
594: println("");
595: println("/** Lexer nextToken rule:");
596: println(" * The lexer nextToken rule is synthesized from all of the user-defined");
597: println(" * lexer rules. It logically consists of one big alternative block with");
598: println(" * each user-defined rule being an alternative.");
599: println(" */");
600:
601: // Create the synthesized rule block for nextToken consisting
602: // of an alternate block containing all the user-defined lexer rules.
603: RuleBlock blk = MakeGrammar.createNextTokenRule(grammar,
604: grammar.rules, "nextToken");
605:
606: // Define the nextToken rule symbol
607: RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
608: nextTokenRs.setDefined();
609: nextTokenRs.setBlock(blk);
610: nextTokenRs.access = "private";
611: grammar.define(nextTokenRs);
612:
613: /*
614: // Analyze the synthesized block
615: if (!grammar.theLLkAnalyzer.deterministic(blk))
616: {
617: println("The grammar analyzer has determined that the synthesized");
618: println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
619: println("This means that there is some overlap of the character");
620: println("lookahead for two or more of your lexer rules.");
621: }
622: */
623:
624: genCommonBlock(blk);
625: }
626:
627: /** Generate code for a named rule block
628: * @param s The RuleSymbol describing the rule to generate
629: */
630: public void genRule(RuleSymbol s) {
631: if (s == null || !s.isDefined())
632: return; // undefined rule
633: println("");
634: if (s.comment != null) {
635: _println(HTMLEncode(s.comment));
636: }
637: if (s.access.length() != 0) {
638: if (!s.access.equals("public")) {
639: _print(s.access + " ");
640: }
641: }
642: _print("<a name=\"" + s.getId() + "\">");
643: _print(s.getId());
644: _print("</a>");
645:
646: // Get rule return type and arguments
647: RuleBlock rblk = s.getBlock();
648:
649: // RK: for HTML output not of much value...
650: // Gen method return value(s)
651: // if (rblk.returnAction != null) {
652: // _print("["+rblk.returnAction+"]");
653: // }
654: // Gen arguments
655: // if (rblk.argAction != null)
656: // {
657: // _print(" returns [" + rblk.argAction+"]");
658: // }
659: _println("");
660: tabs++;
661: print(":\t");
662:
663: // Dump any init-action
664: // genBlockPreamble(rblk);
665:
666: // Dump the alternates of the rule
667: genCommonBlock(rblk);
668:
669: _println("");
670: println(";");
671: tabs--;
672: }
673:
674: /** Generate the syntactic predicate. This basically generates
675: * the alternative block, buts tracks if we are inside a synPred
676: * @param blk The syntactic predicate block
677: */
678: protected void genSynPred(SynPredBlock blk) {
679: syntacticPredLevel++;
680: genGenericBlock(blk, " =>");
681: syntacticPredLevel--;
682: }
683:
684: public void genTail() {
685: println("</PRE>");
686: println("</BODY>");
687: println("</HTML>");
688: }
689:
690: /** Generate the token types TXT file */
691: protected void genTokenTypes(TokenManager tm) throws IOException {
692: // Open the token output TXT file and set the currentOutput stream
693: antlrTool.reportProgress("Generating " + tm.getName()
694: + TokenTypesFileSuffix + TokenTypesFileExt);
695: currentOutput = antlrTool.openOutputFile(tm.getName()
696: + TokenTypesFileSuffix + TokenTypesFileExt);
697: //SAS: changed for proper text file io
698: tabs = 0;
699:
700: // Generate the header common to all diagnostic files
701: genHeader();
702:
703: // Generate a string for each token. This creates a static
704: // array of Strings indexed by token type.
705: println("");
706: println("*** Tokens used by the parser");
707: println("This is a list of the token numeric values and the corresponding");
708: println("token identifiers. Some tokens are literals, and because of that");
709: println("they have no identifiers. Literals are double-quoted.");
710: tabs++;
711:
712: // Enumerate all the valid token types
713: Vector v = tm.getVocabulary();
714: for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
715: String s = (String) v.elementAt(i);
716: if (s != null) {
717: println(s + " = " + i);
718: }
719: }
720:
721: // Close the interface
722: tabs--;
723: println("*** End of tokens used by the parser");
724:
725: // Close the tokens output file
726: currentOutput.close();
727: currentOutput = null;
728: }
729:
730: /** Get a string for an expression to generate creation of an AST subtree.
731: * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
732: */
733: public String getASTCreateString(Vector v) {
734: return null;
735: }
736:
737: /** Get a string for an expression to generate creating of an AST node
738: * @param str The arguments to the AST constructor
739: */
740: public String getASTCreateString(GrammarAtom atom, String str) {
741: return null;
742: }
743:
744: /** Map an identifier to it's corresponding tree-node variable.
745: * This is context-sensitive, depending on the rule and alternative
746: * being generated
747: * @param id The identifier name to map
748: * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
749: */
750: public String mapTreeId(String id, ActionTransInfo tInfo) {
751: return id;
752: }
753:
754: /// unused.
755: protected String processActionForSpecialSymbols(String actionStr,
756: int line, RuleBlock currentRule, ActionTransInfo tInfo) {
757: return actionStr;
758: }
759:
760: /** Format a lookahead or follow set.
761: * @param depth The depth of the entire lookahead/follow
762: * @param k The lookahead level to print
763: * @param lookahead The lookahead/follow set to print
764: */
765: public void printSet(int depth, int k, Lookahead lookahead) {
766: int numCols = 5;
767:
768: int[] elems = lookahead.fset.toArray();
769:
770: if (depth != 1) {
771: print("k==" + k + ": {");
772: } else {
773: print("{ ");
774: }
775: if (elems.length > numCols) {
776: _println("");
777: tabs++;
778: print("");
779: }
780:
781: int column = 0;
782: for (int i = 0; i < elems.length; i++) {
783: column++;
784: if (column > numCols) {
785: _println("");
786: print("");
787: column = 0;
788: }
789: if (doingLexRules) {
790: _print(charFormatter.literalChar(elems[i]));
791: } else {
792: _print((String) grammar.tokenManager.getVocabulary()
793: .elementAt(elems[i]));
794: }
795: if (i != elems.length - 1) {
796: _print(", ");
797: }
798: }
799:
800: if (elems.length > numCols) {
801: _println("");
802: tabs--;
803: print("");
804: }
805: _println(" }");
806: }
807: }
|