001: package antlr;
002:
003: /* ANTLR Translator Generator
004: * Project led by Terence Parr at http://www.jGuru.com
005: * Software rights: http://www.antlr.org/RIGHTS.html
006: *
007: * $Id: HTMLCodeGenerator.java,v 1.1 2004/01/21 19:18:31 rgrimm Exp $
008: */
009:
010: import java.util.Enumeration;
011:
012: import antlr.collections.impl.BitSet;
013: import antlr.collections.impl.Vector;
014:
015: import java.io.PrintWriter; //SAS: changed for proper text file io
016: import java.io.IOException;
017: import java.io.FileWriter;
018:
019: /**Generate P.html, a cross-linked representation of P with or without actions */
020: public class HTMLCodeGenerator extends CodeGenerator {
021: /** non-zero if inside syntactic predicate generation */
022: protected int syntacticPredLevel = 0;
023:
024: /** true during lexer generation, false during parser generation */
025: protected boolean doingLexRules = false;
026:
027: protected boolean firstElementInAlt;
028:
029: protected AlternativeElement prevAltElem = null; // what was generated last?
030:
031: /** Create a Diagnostic code-generator using the given Grammar
032: * The caller must still call setTool, setBehavior, and setAnalyzer
033: * before generating code.
034: */
035: public HTMLCodeGenerator() {
036: super ();
037: charFormatter = new JavaCharFormatter();
038: }
039:
040: /** Encode a string for printing in a HTML document..
041: * e.g. encode '<' '>' and similar stuff
042: * @param s the string to encode
043: */
044: static String HTMLEncode(String s) {
045: StringBuffer buf = new StringBuffer();
046:
047: for (int i = 0, len = s.length(); i < len; i++) {
048: char c = s.charAt(i);
049: if (c == '&')
050: buf.append("&");
051: else if (c == '\"')
052: buf.append(""");
053: else if (c == '\'')
054: buf.append("'");
055: else if (c == '<')
056: buf.append("<");
057: else if (c == '>')
058: buf.append(">");
059: else
060: buf.append(c);
061: }
062: return buf.toString();
063: }
064:
065: public void gen() {
066: // Do the code generation
067: try {
068: // Loop over all grammars
069: Enumeration grammarIter = behavior.grammars.elements();
070: while (grammarIter.hasMoreElements()) {
071: Grammar g = (Grammar) grammarIter.nextElement();
072:
073: // Connect all the components to each other
074: /*
075: g.setGrammarAnalyzer(analyzer);
076: analyzer.setGrammar(g);
077: */
078: g.setCodeGenerator(this );
079:
080: // To get right overloading behavior across hetrogeneous grammars
081: g.generate();
082:
083: if (antlrTool.hasError()) {
084: antlrTool.fatalError("Exiting due to errors.");
085: }
086:
087: }
088:
089: } catch (IOException e) {
090: antlrTool.reportException(e, null);
091: }
092: }
093:
094: /** Generate code for the given grammar element.
095: * @param blk The {...} action to generate
096: */
097: public void gen(ActionElement action) {
098: // no-op
099: }
100:
101: /** Generate code for the given grammar element.
102: * @param blk The "x|y|z|..." block to generate
103: */
104: public void gen(AlternativeBlock blk) {
105: genGenericBlock(blk, "");
106: }
107:
108: /** Generate code for the given grammar element.
109: * @param blk The block-end element to generate. Block-end
110: * elements are synthesized by the grammar parser to represent
111: * the end of a block.
112: */
113: public void gen(BlockEndElement end) {
114: // no-op
115: }
116:
117: /** Generate code for the given grammar element.
118: * @param blk The character literal reference to generate
119: */
120: public void gen(CharLiteralElement atom) {
121: if (atom.not) {
122: _print("~");
123: }
124: _print(HTMLEncode(atom.atomText) + " ");
125: }
126:
127: /** Generate code for the given grammar element.
128: * @param blk The character-range reference to generate
129: */
130: public void gen(CharRangeElement r) {
131: print(r.beginText + ".." + r.endText + " ");
132: }
133:
134: /** Generate the lexer HTML file */
135: public void gen(LexerGrammar g) throws IOException {
136: setGrammar(g);
137: antlrTool.reportProgress("Generating " + grammar.getClassName()
138: + TokenTypesFileExt);
139: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
140: + TokenTypesFileExt);
141: //SAS: changed for proper text file io
142:
143: tabs = 0;
144: doingLexRules = true;
145:
146: // Generate header common to all TXT output files
147: genHeader();
148:
149: // Output the user-defined lexer premamble
150: // RK: guess not..
151: // println(grammar.preambleAction.getText());
152:
153: // Generate lexer class definition
154: println("");
155:
156: // print javadoc comment if any
157: if (grammar.comment != null) {
158: _println(HTMLEncode(grammar.comment));
159: }
160:
161: println("Definition of lexer " + grammar.getClassName()
162: + ", which is a subclass of " + grammar.getSuperClass()
163: + ".");
164:
165: // Generate user-defined parser class members
166: // printAction(grammar.classMemberAction.getText());
167:
168: /*
169: // Generate string literals
170: println("");
171: println("*** String literals used in the parser");
172: println("The following string literals were used in the parser.");
173: println("An actual code generator would arrange to place these literals");
174: println("into a table in the generated lexer, so that actions in the");
175: println("generated lexer could match token text against the literals.");
176: println("String literals used in the lexer are not listed here, as they");
177: println("are incorporated into the mainstream lexer processing.");
178: tabs++;
179: // Enumerate all of the symbols and look for string literal symbols
180: Enumeration ids = grammar.getSymbols();
181: while ( ids.hasMoreElements() ) {
182: GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
183: // Only processing string literals -- reject other symbol entries
184: if ( sym instanceof StringLiteralSymbol ) {
185: StringLiteralSymbol s = (StringLiteralSymbol)sym;
186: println(s.getId() + " = " + s.getTokenType());
187: }
188: }
189: tabs--;
190: println("*** End of string literals used by the parser");
191: */
192:
193: // Generate nextToken() rule.
194: // nextToken() is a synthetic lexer rule that is the implicit OR of all
195: // user-defined lexer rules.
196: genNextToken();
197:
198: // Generate code for each rule in the lexer
199:
200: Enumeration ids = grammar.rules.elements();
201: while (ids.hasMoreElements()) {
202: RuleSymbol rs = (RuleSymbol) ids.nextElement();
203: if (!rs.id.equals("mnextToken")) {
204: genRule(rs);
205: }
206: }
207:
208: // Close the lexer output file
209: currentOutput.close();
210: currentOutput = null;
211: doingLexRules = false;
212: }
213:
214: /** Generate code for the given grammar element.
215: * @param blk The (...)+ block to generate
216: */
217: public void gen(OneOrMoreBlock blk) {
218: genGenericBlock(blk, "+");
219: }
220:
221: /** Generate the parser HTML file */
222: public void gen(ParserGrammar g) throws IOException {
223: setGrammar(g);
224: // Open the output stream for the parser and set the currentOutput
225: antlrTool.reportProgress("Generating " + grammar.getClassName()
226: + ".html");
227: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
228: + ".html");
229:
230: tabs = 0;
231:
232: // Generate the header common to all output files.
233: genHeader();
234:
235: // Generate parser class definition
236: println("");
237:
238: // print javadoc comment if any
239: if (grammar.comment != null) {
240: _println(HTMLEncode(grammar.comment));
241: }
242:
243: println("Definition of parser " + grammar.getClassName()
244: + ", which is a subclass of " + grammar.getSuperClass()
245: + ".");
246:
247: // Enumerate the parser rules
248: Enumeration rules = grammar.rules.elements();
249: while (rules.hasMoreElements()) {
250: println("");
251: // Get the rules from the list and downcast it to proper type
252: GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
253: // Only process parser rules
254: if (sym instanceof RuleSymbol) {
255: genRule((RuleSymbol) sym);
256: }
257: }
258: tabs--;
259: println("");
260:
261: genTail();
262:
263: // Close the parser output stream
264: currentOutput.close();
265: currentOutput = null;
266: }
267:
268: /** Generate code for the given grammar element.
269: * @param blk The rule-reference to generate
270: */
271: public void gen(RuleRefElement rr) {
272: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(rr.targetRule);
273:
274: // Generate the actual rule description
275: _print("<a href=\"" + grammar.getClassName() + ".html#"
276: + rr.targetRule + "\">");
277: _print(rr.targetRule);
278: _print("</a>");
279: // RK: Leave out args..
280: // if (rr.args != null) {
281: // _print("["+rr.args+"]");
282: // }
283: _print(" ");
284: }
285:
286: /** Generate code for the given grammar element.
287: * @param blk The string-literal reference to generate
288: */
289: public void gen(StringLiteralElement atom) {
290: if (atom.not) {
291: _print("~");
292: }
293: _print(HTMLEncode(atom.atomText));
294: _print(" ");
295: }
296:
297: /** Generate code for the given grammar element.
298: * @param blk The token-range reference to generate
299: */
300: public void gen(TokenRangeElement r) {
301: print(r.beginText + ".." + r.endText + " ");
302: }
303:
304: /** Generate code for the given grammar element.
305: * @param blk The token-reference to generate
306: */
307: public void gen(TokenRefElement atom) {
308: if (atom.not) {
309: _print("~");
310: }
311: _print(atom.atomText);
312: _print(" ");
313: }
314:
315: public void gen(TreeElement t) {
316: print(t + " ");
317: }
318:
319: /** Generate the tree-walker TXT file */
320: public void gen(TreeWalkerGrammar g) throws IOException {
321: setGrammar(g);
322: // Open the output stream for the parser and set the currentOutput
323: antlrTool.reportProgress("Generating " + grammar.getClassName()
324: + ".html");
325: currentOutput = antlrTool.openOutputFile(grammar.getClassName()
326: + ".html");
327: //SAS: changed for proper text file io
328:
329: tabs = 0;
330:
331: // Generate the header common to all output files.
332: genHeader();
333:
334: // Output the user-defined parser premamble
335: println("");
336: // println("*** Tree-walker Preamble Action.");
337: // println("This action will appear before the declaration of your tree-walker class:");
338: // tabs++;
339: // println(grammar.preambleAction.getText());
340: // tabs--;
341: // println("*** End of tree-walker Preamble Action");
342:
343: // Generate tree-walker class definition
344: println("");
345:
346: // print javadoc comment if any
347: if (grammar.comment != null) {
348: _println(HTMLEncode(grammar.comment));
349: }
350:
351: println("Definition of tree parser " + grammar.getClassName()
352: + ", which is a subclass of " + grammar.getSuperClass()
353: + ".");
354:
355: // Generate user-defined tree-walker class members
356: // println("");
357: // println("*** User-defined tree-walker class members:");
358: // println("These are the member declarations that you defined for your class:");
359: // tabs++;
360: // printAction(grammar.classMemberAction.getText());
361: // tabs--;
362: // println("*** End of user-defined tree-walker class members");
363:
364: // Generate code for each rule in the grammar
365: println("");
366: // println("*** tree-walker rules:");
367: tabs++;
368:
369: // Enumerate the tree-walker rules
370: Enumeration rules = grammar.rules.elements();
371: while (rules.hasMoreElements()) {
372: println("");
373: // Get the rules from the list and downcast it to proper type
374: GrammarSymbol sym = (GrammarSymbol) rules.nextElement();
375: // Only process tree-walker rules
376: if (sym instanceof RuleSymbol) {
377: genRule((RuleSymbol) sym);
378: }
379: }
380: tabs--;
381: println("");
382: // println("*** End of tree-walker rules");
383:
384: // println("");
385: // println("*** End of tree-walker");
386:
387: // Close the tree-walker output stream
388: currentOutput.close();
389: currentOutput = null;
390: }
391:
392: /** Generate a wildcard element */
393: public void gen(WildcardElement wc) {
394: /*
395: if ( wc.getLabel()!=null ) {
396: _print(wc.getLabel()+"=");
397: }
398: */
399: _print(". ");
400: }
401:
402: /** Generate code for the given grammar element.
403: * @param blk The (...)* block to generate
404: */
405: public void gen(ZeroOrMoreBlock blk) {
406: genGenericBlock(blk, "*");
407: }
408:
409: protected void genAlt(Alternative alt) {
410: if (alt.getTreeSpecifier() != null) {
411: _print(alt.getTreeSpecifier().getText());
412: }
413: prevAltElem = null;
414: for (AlternativeElement elem = alt.head; !(elem instanceof BlockEndElement); elem = elem.next) {
415: elem.generate();
416: firstElementInAlt = false;
417: prevAltElem = elem;
418: }
419: }
420:
421: /** Generate the header for a block, which may be a RuleBlock or a
422: * plain AlternativeBLock. This generates any variable declarations,
423: * init-actions, and syntactic-predicate-testing variables.
424: * @blk The block for which the preamble is to be generated.
425: */
426: // protected void genBlockPreamble(AlternativeBlock blk) {
427: // RK: don't dump out init actions
428: // dump out init action
429: // if ( blk.initAction!=null ) {
430: // printAction("{" + blk.initAction + "}");
431: // }
432: // }
433: /**Generate common code for a block of alternatives; return a postscript
434: * that needs to be generated at the end of the block. Other routines
435: * may append else-clauses and such for error checking before the postfix
436: * is generated.
437: */
438: public void genCommonBlock(AlternativeBlock blk) {
439: for (int i = 0; i < blk.alternatives.size(); i++) {
440: Alternative alt = blk.getAlternativeAt(i);
441: AlternativeElement elem = alt.head;
442:
443: // dump alt operator |
444: if (i > 0 && blk.alternatives.size() > 1) {
445: _println("");
446: print("|\t");
447: }
448:
449: // Dump the alternative, starting with predicates
450: //
451: boolean save = firstElementInAlt;
452: firstElementInAlt = true;
453: tabs++; // in case we do a newline in alt, increase the tab indent
454:
455: // RK: don't dump semantic/syntactic predicates
456: // only obscures grammar.
457: //
458: // Dump semantic predicates
459: //
460: // if (alt.semPred != null) {
461: // println("{" + alt.semPred + "}?");
462: // }
463: // Dump syntactic predicate
464: // if (alt.synPred != null) {
465: // genSynPred(alt.synPred);
466: // }
467: genAlt(alt);
468: tabs--;
469: firstElementInAlt = save;
470: }
471: }
472:
473: /** Generate a textual representation of the follow set
474: * for a block.
475: * @param blk The rule block of interest
476: */
477: public void genFollowSetForRuleBlock(RuleBlock blk) {
478: Lookahead follow = grammar.theLLkAnalyzer
479: .FOLLOW(1, blk.endNode);
480: printSet(grammar.maxk, 1, follow);
481: }
482:
483: protected void genGenericBlock(AlternativeBlock blk, String blkOp) {
484: if (blk.alternatives.size() > 1) {
485: // make sure we start on a new line
486: if (!firstElementInAlt) {
487: // only do newline if the last element wasn't a multi-line block
488: if (prevAltElem == null
489: || !(prevAltElem instanceof AlternativeBlock)
490: || ((AlternativeBlock) prevAltElem).alternatives
491: .size() == 1) {
492: _println("");
493: print("(\t");
494: } else {
495: _print("(\t");
496: }
497: // _println("");
498: // print("(\t");
499: } else {
500: _print("(\t");
501: }
502: } else {
503: _print("( ");
504: }
505: // RK: don't dump init actions
506: // genBlockPreamble(blk);
507: genCommonBlock(blk);
508: if (blk.alternatives.size() > 1) {
509: _println("");
510: print(")" + blkOp + " ");
511: // if not last element of alt, need newline & to indent
512: if (!(blk.next instanceof BlockEndElement)) {
513: _println("");
514: print("");
515: }
516: } else {
517: _print(")" + blkOp + " ");
518: }
519: }
520:
521: /** Generate a header that is common to all TXT files */
522: protected void genHeader() {
523: println("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">");
524: println("<HTML>");
525: println("<HEAD>");
526: println("<TITLE>Grammar " + antlrTool.grammarFile + "</TITLE>");
527: println("</HEAD>");
528: println("<BODY>");
529: println("<table summary=\"\" border=\"1\" cellpadding=\"5\">");
530: println("<tr>");
531: println("<td>");
532: println("<font size=\"+2\">Grammar " + grammar.getClassName()
533: + "</font><br>");
534: println("<a href=\"http://www.ANTLR.org\">ANTLR</a>-generated HTML file from "
535: + antlrTool.grammarFile);
536: println("<p>");
537: println("Terence Parr, <a href=\"http://www.magelang.com\">MageLang Institute</a>");
538: println("<br>ANTLR Version " + antlrTool.version
539: + "; 1989-1999");
540: println("</td>");
541: println("</tr>");
542: println("</table>");
543: println("<PRE>");
544: // RK: see no reason for printing include files and stuff...
545: // tabs++;
546: // printAction(behavior.getHeaderAction(""));
547: // tabs--;
548: }
549:
550: /**Generate the lookahead set for an alternate. */
551: protected void genLookaheadSetForAlt(Alternative alt) {
552: if (doingLexRules && alt.cache[1].containsEpsilon()) {
553: println("MATCHES ALL");
554: return;
555: }
556: int depth = alt.lookaheadDepth;
557: if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
558: // if the decision is nondeterministic, do the best we can: LL(k)
559: // any predicates that are around will be generated later.
560: depth = grammar.maxk;
561: }
562: for (int i = 1; i <= depth; i++) {
563: Lookahead lookahead = alt.cache[i];
564: printSet(depth, i, lookahead);
565: }
566: }
567:
568: /** Generate a textual representation of the lookahead set
569: * for a block.
570: * @param blk The block of interest
571: */
572: public void genLookaheadSetForBlock(AlternativeBlock blk) {
573: // Find the maximal lookahead depth over all alternatives
574: int depth = 0;
575: for (int i = 0; i < blk.alternatives.size(); i++) {
576: Alternative alt = blk.getAlternativeAt(i);
577: if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
578: depth = grammar.maxk;
579: break;
580: } else if (depth < alt.lookaheadDepth) {
581: depth = alt.lookaheadDepth;
582: }
583: }
584:
585: for (int i = 1; i <= depth; i++) {
586: Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
587: printSet(depth, i, lookahead);
588: }
589: }
590:
591: /** Generate the nextToken rule.
592: * nextToken is a synthetic lexer rule that is the implicit OR of all
593: * user-defined lexer rules.
594: */
595: public void genNextToken() {
596: println("");
597: println("/** Lexer nextToken rule:");
598: println(" * The lexer nextToken rule is synthesized from all of the user-defined");
599: println(" * lexer rules. It logically consists of one big alternative block with");
600: println(" * each user-defined rule being an alternative.");
601: println(" */");
602:
603: // Create the synthesized rule block for nextToken consisting
604: // of an alternate block containing all the user-defined lexer rules.
605: RuleBlock blk = MakeGrammar.createNextTokenRule(grammar,
606: grammar.rules, "nextToken");
607:
608: // Define the nextToken rule symbol
609: RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
610: nextTokenRs.setDefined();
611: nextTokenRs.setBlock(blk);
612: nextTokenRs.access = "private";
613: grammar.define(nextTokenRs);
614:
615: /*
616: // Analyze the synthesized block
617: if (!grammar.theLLkAnalyzer.deterministic(blk))
618: {
619: println("The grammar analyzer has determined that the synthesized");
620: println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
621: println("This means that there is some overlap of the character");
622: println("lookahead for two or more of your lexer rules.");
623: }
624: */
625:
626: genCommonBlock(blk);
627: }
628:
629: /** Generate code for a named rule block
630: * @param s The RuleSymbol describing the rule to generate
631: */
632: public void genRule(RuleSymbol s) {
633: if (s == null || !s.isDefined())
634: return; // undefined rule
635: println("");
636: if (s.comment != null) {
637: _println(HTMLEncode(s.comment));
638: }
639: if (s.access.length() != 0) {
640: if (!s.access.equals("public")) {
641: _print(s.access + " ");
642: }
643: }
644: _print("<a name=\"" + s.getId() + "\">");
645: _print(s.getId());
646: _print("</a>");
647:
648: // Get rule return type and arguments
649: RuleBlock rblk = s.getBlock();
650:
651: // RK: for HTML output not of much value...
652: // Gen method return value(s)
653: // if (rblk.returnAction != null) {
654: // _print("["+rblk.returnAction+"]");
655: // }
656: // Gen arguments
657: // if (rblk.argAction != null)
658: // {
659: // _print(" returns [" + rblk.argAction+"]");
660: // }
661: _println("");
662: tabs++;
663: print(":\t");
664:
665: // Dump any init-action
666: // genBlockPreamble(rblk);
667:
668: // Dump the alternates of the rule
669: genCommonBlock(rblk);
670:
671: _println("");
672: println(";");
673: tabs--;
674: }
675:
676: /** Generate the syntactic predicate. This basically generates
677: * the alternative block, buts tracks if we are inside a synPred
678: * @param blk The syntactic predicate block
679: */
680: protected void genSynPred(SynPredBlock blk) {
681: syntacticPredLevel++;
682: genGenericBlock(blk, " =>");
683: syntacticPredLevel--;
684: }
685:
686: public void genTail() {
687: println("</PRE>");
688: println("</BODY>");
689: println("</HTML>");
690: }
691:
692: /** Generate the token types TXT file */
693: protected void genTokenTypes(TokenManager tm) throws IOException {
694: // Open the token output TXT file and set the currentOutput stream
695: antlrTool.reportProgress("Generating " + tm.getName()
696: + TokenTypesFileSuffix + TokenTypesFileExt);
697: currentOutput = antlrTool.openOutputFile(tm.getName()
698: + TokenTypesFileSuffix + TokenTypesFileExt);
699: //SAS: changed for proper text file io
700: tabs = 0;
701:
702: // Generate the header common to all diagnostic files
703: genHeader();
704:
705: // Generate a string for each token. This creates a static
706: // array of Strings indexed by token type.
707: println("");
708: println("*** Tokens used by the parser");
709: println("This is a list of the token numeric values and the corresponding");
710: println("token identifiers. Some tokens are literals, and because of that");
711: println("they have no identifiers. Literals are double-quoted.");
712: tabs++;
713:
714: // Enumerate all the valid token types
715: Vector v = tm.getVocabulary();
716: for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
717: String s = (String) v.elementAt(i);
718: if (s != null) {
719: println(s + " = " + i);
720: }
721: }
722:
723: // Close the interface
724: tabs--;
725: println("*** End of tokens used by the parser");
726:
727: // Close the tokens output file
728: currentOutput.close();
729: currentOutput = null;
730: }
731:
732: /** Get a string for an expression to generate creation of an AST subtree.
733: * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
734: */
735: public String getASTCreateString(Vector v) {
736: return null;
737: }
738:
739: /** Get a string for an expression to generate creating of an AST node
740: * @param str The arguments to the AST constructor
741: */
742: public String getASTCreateString(GrammarAtom atom, String str) {
743: return null;
744: }
745:
746: /** Map an identifier to it's corresponding tree-node variable.
747: * This is context-sensitive, depending on the rule and alternative
748: * being generated
749: * @param id The identifier name to map
750: * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
751: */
752: public String mapTreeId(String id, ActionTransInfo tInfo) {
753: return id;
754: }
755:
756: /// unused.
757: protected String processActionForSpecialSymbols(String actionStr,
758: int line, RuleBlock currentRule, ActionTransInfo tInfo) {
759: return actionStr;
760: }
761:
762: /** Format a lookahead or follow set.
763: * @param depth The depth of the entire lookahead/follow
764: * @param k The lookahead level to print
765: * @param lookahead The lookahead/follow set to print
766: */
767: public void printSet(int depth, int k, Lookahead lookahead) {
768: int numCols = 5;
769:
770: int[] elems = lookahead.fset.toArray();
771:
772: if (depth != 1) {
773: print("k==" + k + ": {");
774: } else {
775: print("{ ");
776: }
777: if (elems.length > numCols) {
778: _println("");
779: tabs++;
780: print("");
781: }
782:
783: int column = 0;
784: for (int i = 0; i < elems.length; i++) {
785: column++;
786: if (column > numCols) {
787: _println("");
788: print("");
789: column = 0;
790: }
791: if (doingLexRules) {
792: _print(charFormatter.literalChar(elems[i]));
793: } else {
794: _print((String) grammar.tokenManager.getVocabulary()
795: .elementAt(elems[i]));
796: }
797: if (i != elems.length - 1) {
798: _print(", ");
799: }
800: }
801:
802: if (elems.length > numCols) {
803: _println("");
804: tabs--;
805: print("");
806: }
807: _println(" }");
808: }
809: }
|