001: package persistence.antlr;
002:
003: /* ANTLR Translator Generator
004: * Project led by Terence Parr at http://www.jGuru.com
005: * Software rights: http://www.antlr.org/license.html
006: *
007: */
008:
009: import persistence.antlr.collections.Stack;
010: import persistence.antlr.collections.impl.LList;
011: import persistence.antlr.collections.impl.Vector;
012:
013: public class MakeGrammar extends DefineGrammarSymbols {
014:
015: protected Stack blocks = new LList(); // track subrules--Stack<BlockContext>
016: protected RuleRefElement lastRuleRef;
017:
018: protected RuleEndElement ruleEnd; // used if not nested
019: protected RuleBlock ruleBlock; // points to block of current rule.
020: protected int nested = 0; // nesting inside a subrule
021: protected boolean grammarError = false;
022:
023: ExceptionSpec currentExceptionSpec = null;
024:
025: public MakeGrammar(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
026: super (tool_, args_, analyzer_);
027: }
028:
029: /** Abort the processing of a grammar (due to syntax errors) */
030: public void abortGrammar() {
031: String s = "unknown grammar";
032: if (grammar != null) {
033: s = grammar.getClassName();
034: }
035: tool.error("aborting grammar '" + s + "' due to errors");
036: super .abortGrammar();
037: }
038:
039: protected void addElementToCurrentAlt(AlternativeElement e) {
040: e.enclosingRuleName = ruleBlock.ruleName;
041: context().addAlternativeElement(e);
042: }
043:
044: public void beginAlt(boolean doAutoGen_) {
045: super .beginAlt(doAutoGen_);
046: Alternative alt = new Alternative();
047: alt.setAutoGen(doAutoGen_);
048: context().block.addAlternative(alt);
049: }
050:
051: public void beginChildList() {
052: super .beginChildList();
053: context().block.addAlternative(new Alternative());
054: }
055:
056: /** Add an exception group to a rule (currently a no-op) */
057: public void beginExceptionGroup() {
058: super .beginExceptionGroup();
059: if (!(context().block instanceof RuleBlock)) {
060: tool
061: .panic("beginExceptionGroup called outside of rule block");
062: }
063: }
064:
065: /** Add an exception spec to an exception group or rule block */
066: public void beginExceptionSpec(Token label) {
067: // Hack the label string a bit to remove leading/trailing space.
068: if (label != null) {
069: label.setText(StringUtils.stripFront(StringUtils.stripBack(
070: label.getText(), " \n\r\t"), " \n\r\t"));
071: }
072: super .beginExceptionSpec(label);
073: // Don't check for currentExceptionSpec!=null because syntax errors
074: // may leave it set to something.
075: currentExceptionSpec = new ExceptionSpec(label);
076: }
077:
078: public void beginSubRule(Token label, Token start, boolean not) {
079: super .beginSubRule(label, start, not);
080: // we don't know what kind of subrule it is yet.
081: // push a dummy one that will allow us to collect the
082: // alternatives. Later, we'll switch to real object.
083: blocks.push(new BlockContext());
084: context().block = new AlternativeBlock(grammar, start, not);
085: context().altNum = 0; // reset alternative number
086: nested++;
087: // create a final node to which the last elememt of each
088: // alternative will point.
089: context().blockEnd = new BlockEndElement(grammar);
090: // make sure end node points to start of block
091: context().blockEnd.block = context().block;
092: labelElement(context().block, label);
093: }
094:
095: public void beginTree(Token tok) throws SemanticException {
096: if (!(grammar instanceof TreeWalkerGrammar)) {
097: tool.error("Trees only allowed in TreeParser", grammar
098: .getFilename(), tok.getLine(), tok.getColumn());
099: throw new SemanticException(
100: "Trees only allowed in TreeParser");
101: }
102: super .beginTree(tok);
103: blocks.push(new TreeBlockContext());
104: context().block = new TreeElement(grammar, tok);
105: context().altNum = 0; // reset alternative number
106: }
107:
108: public BlockContext context() {
109: if (blocks.height() == 0) {
110: return null;
111: } else {
112: return (BlockContext) blocks.top();
113: }
114: }
115:
116: /**Used to build nextToken() for the lexer.
117: * This builds a rule which has every "public" rule in the given Vector of
118: * rules as it's alternate. Each rule ref generates a Token object.
119: * @param g The Grammar that is being processed
120: * @param lexRules A vector of lexer rules that will be used to create an alternate block.
121: * @param rname The name of the resulting rule.
122: */
123: public static RuleBlock createNextTokenRule(Grammar g,
124: Vector lexRules, String rname) {
125: // create actual rule data structure
126: RuleBlock rb = new RuleBlock(g, rname);
127: rb.setDefaultErrorHandler(g.getDefaultErrorHandler());
128: RuleEndElement ruleEnd = new RuleEndElement(g);
129: rb.setEndElement(ruleEnd);
130: ruleEnd.block = rb;
131: // Add an alternative for each element of the rules vector.
132: for (int i = 0; i < lexRules.size(); i++) {
133: RuleSymbol r = (RuleSymbol) lexRules.elementAt(i);
134: if (!r.isDefined()) {
135: g.antlrTool.error("Lexer rule " + r.id.substring(1)
136: + " is not defined");
137: } else {
138: if (r.access.equals("public")) {
139: Alternative alt = new Alternative(); // create alt we'll add to ref rule
140: RuleBlock targetRuleBlock = r.getBlock();
141: Vector targetRuleAlts = targetRuleBlock
142: .getAlternatives();
143: // collect a sem pred if only one alt and it's at the start;
144: // simple, but faster to implement until real hoisting
145: if (targetRuleAlts != null
146: && targetRuleAlts.size() == 1) {
147: Alternative onlyAlt = (Alternative) targetRuleAlts
148: .elementAt(0);
149: if (onlyAlt.semPred != null) {
150: // ok, has sem pred, make this rule ref alt have a pred
151: alt.semPred = onlyAlt.semPred;
152: // REMOVE predicate from target rule??? NOPE, another
153: // rule other than nextToken() might invoke it.
154: }
155: }
156:
157: // create a rule ref to lexer rule
158: // the Token is a RULE_REF not a TOKEN_REF since the
159: // conversion to mRulename has already taken place
160: RuleRefElement rr = new RuleRefElement(g,
161: new CommonToken(ANTLRTokenTypes.RULE_REF, r
162: .getId()),
163: GrammarElement.AUTO_GEN_NONE);
164: rr.setLabel("theRetToken");
165: rr.enclosingRuleName = "nextToken";
166: rr.next = ruleEnd;
167: alt.addElement(rr); // add rule ref to alt
168: alt.setAutoGen(true); // keep text of elements
169: rb.addAlternative(alt); // add alt to rule block
170: r.addReference(rr); // track ref to this rule in rule blk
171: }
172: }
173: }
174:
175: rb.setAutoGen(true); // keep text of elements
176: rb.prepareForAnalysis();
177: //System.out.println(rb);
178: return rb;
179: }
180:
181: /** Return block as if they had typed: "( rule )?" */
182: private AlternativeBlock createOptionalRuleRef(String rule,
183: Token start) {
184: // Make the subrule
185: AlternativeBlock blk = new AlternativeBlock(grammar, start,
186: false);
187:
188: // Make sure rule is defined
189: String mrule = CodeGenerator.encodeLexerRuleName(rule); // can only be a lexer rule!
190: if (!grammar.isDefined(mrule)) {
191: grammar.define(new RuleSymbol(mrule));
192: }
193:
194: // Make the rule ref element
195: // RK: fixme probably easier to abuse start token..
196: Token t = new CommonToken(ANTLRTokenTypes.TOKEN_REF, rule);
197: t.setLine(start.getLine());
198: t.setLine(start.getColumn());
199: RuleRefElement rref = new RuleRefElement(grammar, t,
200: GrammarElement.AUTO_GEN_NONE);
201:
202: rref.enclosingRuleName = ruleBlock.ruleName;
203:
204: // Make the end of block element
205: BlockEndElement end = new BlockEndElement(grammar);
206: end.block = blk; // end block points back to start of blk
207:
208: // Make an alternative, putting the rule ref into it
209: Alternative alt = new Alternative(rref);
210: alt.addElement(end); // last element in alt points to end of block
211:
212: // Add the alternative to this block
213: blk.addAlternative(alt);
214:
215: // create an empty (optional) alt and add to blk
216: Alternative optAlt = new Alternative();
217: optAlt.addElement(end); // points immediately to end of block
218:
219: blk.addAlternative(optAlt);
220:
221: blk.prepareForAnalysis();
222: return blk;
223: }
224:
225: public void defineRuleName(Token r, String access,
226: boolean ruleAutoGen, String docComment)
227: throws SemanticException {
228: // if ( Character.isUpperCase(r.getText().charAt(0)) ) {
229: if (r.type == ANTLRTokenTypes.TOKEN_REF) {
230: if (!(grammar instanceof LexerGrammar)) {
231: tool.error("Lexical rule " + r.getText()
232: + " defined outside of lexer", grammar
233: .getFilename(), r.getLine(), r.getColumn());
234: r.setText(r.getText().toLowerCase());
235: }
236: } else {
237: if (grammar instanceof LexerGrammar) {
238: tool.error("Lexical rule names must be upper case, '"
239: + r.getText() + "' is not", grammar
240: .getFilename(), r.getLine(), r.getColumn());
241: r.setText(r.getText().toUpperCase());
242: }
243: }
244:
245: super .defineRuleName(r, access, ruleAutoGen, docComment);
246: String id = r.getText();
247: // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
248: if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
249: id = CodeGenerator.encodeLexerRuleName(id);
250: }
251: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(id);
252: RuleBlock rb = new RuleBlock(grammar, r.getText(), r.getLine(),
253: ruleAutoGen);
254:
255: // Lexer rules do not generate default error handling
256: rb.setDefaultErrorHandler(grammar.getDefaultErrorHandler());
257:
258: ruleBlock = rb;
259: blocks.push(new BlockContext()); // enter new context
260: context().block = rb;
261: rs.setBlock(rb);
262: ruleEnd = new RuleEndElement(grammar);
263: rb.setEndElement(ruleEnd);
264: nested = 0;
265: }
266:
267: public void endAlt() {
268: super .endAlt();
269: if (nested == 0) { // all rule-level alts link to ruleEnd node
270: addElementToCurrentAlt(ruleEnd);
271: } else {
272: addElementToCurrentAlt(context().blockEnd);
273: }
274: context().altNum++;
275: }
276:
277: public void endChildList() {
278: super .endChildList();
279: // create a final node to which the last elememt of the single
280: // alternative will point. Done for compatibility with analyzer.
281: // Does NOT point to any block like alternative blocks because the
282: // TreeElement is not a block. This is used only as a placeholder.
283: BlockEndElement be = new BlockEndElement(grammar);
284: be.block = context().block;
285: addElementToCurrentAlt(be);
286: }
287:
288: public void endExceptionGroup() {
289: super .endExceptionGroup();
290: }
291:
292: public void endExceptionSpec() {
293: super .endExceptionSpec();
294: if (currentExceptionSpec == null) {
295: tool
296: .panic("exception processing internal error -- no active exception spec");
297: }
298: if (context().block instanceof RuleBlock) {
299: // Named rule
300: ((RuleBlock) context().block)
301: .addExceptionSpec(currentExceptionSpec);
302: } else {
303: // It must be a plain-old alternative block
304: if (context().currentAlt().exceptionSpec != null) {
305: tool
306: .error(
307: "Alternative already has an exception specification",
308: grammar.getFilename(), context().block
309: .getLine(), context().block
310: .getColumn());
311: } else {
312: context().currentAlt().exceptionSpec = currentExceptionSpec;
313: }
314: }
315: currentExceptionSpec = null;
316: }
317:
318: /** Called at the end of processing a grammar */
319: public void endGrammar() {
320: if (grammarError) {
321: abortGrammar();
322: } else {
323: super .endGrammar();
324: }
325: }
326:
327: public void endRule(String rule) {
328: super .endRule(rule);
329: BlockContext ctx = (BlockContext) blocks.pop(); // remove scope
330: // record the start of this block in the ending node
331: ruleEnd.block = ctx.block;
332: ruleEnd.block.prepareForAnalysis();
333: //System.out.println(ctx.block);
334: }
335:
336: public void endSubRule() {
337: super .endSubRule();
338: nested--;
339: // remove subrule context from scope stack
340: BlockContext ctx = (BlockContext) blocks.pop();
341: AlternativeBlock block = ctx.block;
342:
343: // If the subrule is marked with ~, check that it is
344: // a valid candidate for analysis
345: if (block.not && !(block instanceof SynPredBlock)
346: && !(block instanceof ZeroOrMoreBlock)
347: && !(block instanceof OneOrMoreBlock)) {
348: if (!analyzer.subruleCanBeInverted(block,
349: grammar instanceof LexerGrammar)) {
350: String newline = System.getProperty("line.separator");
351: tool
352: .error(
353: "This subrule cannot be inverted. Only subrules of the form:"
354: + newline
355: + " (T1|T2|T3...) or"
356: + newline
357: + " ('c1'|'c2'|'c3'...)"
358: + newline
359: + "may be inverted (ranges are also allowed).",
360: grammar.getFilename(), block.getLine(),
361: block.getColumn());
362: }
363: }
364:
365: // add the subrule as element if not a syn pred
366: if (block instanceof SynPredBlock) {
367: // record a reference to the recently-recognized syn pred in the
368: // enclosing block.
369: SynPredBlock synpred = (SynPredBlock) block;
370: context().block.hasASynPred = true;
371: context().currentAlt().synPred = synpred;
372: grammar.hasSyntacticPredicate = true;
373: synpred.removeTrackingOfRuleRefs(grammar);
374: } else {
375: addElementToCurrentAlt(block);
376: }
377: ctx.blockEnd.block.prepareForAnalysis();
378: }
379:
380: public void endTree() {
381: super .endTree();
382: BlockContext ctx = (BlockContext) blocks.pop();
383: addElementToCurrentAlt(ctx.block); // add new TreeElement to enclosing alt.
384: }
385:
386: /** Remember that a major error occured in the grammar */
387: public void hasError() {
388: grammarError = true;
389: }
390:
391: private void labelElement(AlternativeElement el, Token label) {
392: if (label != null) {
393: // Does this label already exist?
394: for (int i = 0; i < ruleBlock.labeledElements.size(); i++) {
395: AlternativeElement altEl = (AlternativeElement) ruleBlock.labeledElements
396: .elementAt(i);
397: String l = altEl.getLabel();
398: if (l != null && l.equals(label.getText())) {
399: tool.error("Label '" + label.getText()
400: + "' has already been defined", grammar
401: .getFilename(), label.getLine(), label
402: .getColumn());
403: return;
404: }
405: }
406: // add this node to the list of labeled elements
407: el.setLabel(label.getText());
408: ruleBlock.labeledElements.appendElement(el);
409: }
410: }
411:
412: public void noAutoGenSubRule() {
413: context().block.setAutoGen(false);
414: }
415:
416: public void oneOrMoreSubRule() {
417: if (context().block.not) {
418: tool.error("'~' cannot be applied to (...)* subrule",
419: grammar.getFilename(), context().block.getLine(),
420: context().block.getColumn());
421: }
422: // create the right kind of object now that we know what that is
423: // and switch the list of alternatives. Adjust the stack of blocks.
424: // copy any init action also.
425: OneOrMoreBlock b = new OneOrMoreBlock(grammar);
426: setBlock(b, context().block);
427: BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
428: blocks.push(new BlockContext());
429: context().block = b;
430: context().blockEnd = old.blockEnd;
431: context().blockEnd.block = b;
432: }
433:
434: public void optionalSubRule() {
435: if (context().block.not) {
436: tool.error("'~' cannot be applied to (...)? subrule",
437: grammar.getFilename(), context().block.getLine(),
438: context().block.getColumn());
439: }
440: // convert (X)? -> (X|) so that we can ignore optional blocks altogether!
441: // It already thinks that we have a simple subrule, just add option block.
442: beginAlt(false);
443: endAlt();
444: }
445:
446: public void refAction(Token action) {
447: super .refAction(action);
448: context().block.hasAnAction = true;
449: addElementToCurrentAlt(new ActionElement(grammar, action));
450: }
451:
452: public void setUserExceptions(String thr) {
453: ((RuleBlock) context().block).throwsSpec = thr;
454: }
455:
456: // Only called for rule blocks
457: public void refArgAction(Token action) {
458: ((RuleBlock) context().block).argAction = action.getText();
459: }
460:
461: public void refCharLiteral(Token lit, Token label,
462: boolean inverted, int autoGenType, boolean lastInRule) {
463: if (!(grammar instanceof LexerGrammar)) {
464: tool.error("Character literal only valid in lexer", grammar
465: .getFilename(), lit.getLine(), lit.getColumn());
466: return;
467: }
468: super .refCharLiteral(lit, label, inverted, autoGenType,
469: lastInRule);
470: CharLiteralElement cl = new CharLiteralElement(
471: (LexerGrammar) grammar, lit, inverted, autoGenType);
472:
473: // Generate a warning for non-lowercase ASCII when case-insensitive
474: if (!((LexerGrammar) grammar).caseSensitive
475: && cl.getType() < 128
476: && Character.toLowerCase((char) cl.getType()) != (char) cl
477: .getType()) {
478: tool
479: .warning(
480: "Character literal must be lowercase when caseSensitive=false",
481: grammar.getFilename(), lit.getLine(), lit
482: .getColumn());
483: }
484:
485: addElementToCurrentAlt(cl);
486: labelElement(cl, label);
487:
488: // if ignore option is set, must add an optional call to the specified rule.
489: String ignore = ruleBlock.getIgnoreRule();
490: if (!lastInRule && ignore != null) {
491: addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
492: }
493: }
494:
495: public void refCharRange(Token t1, Token t2, Token label,
496: int autoGenType, boolean lastInRule) {
497: if (!(grammar instanceof LexerGrammar)) {
498: tool.error("Character range only valid in lexer", grammar
499: .getFilename(), t1.getLine(), t1.getColumn());
500: return;
501: }
502: int rangeMin = ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
503: int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
504: if (rangeMax < rangeMin) {
505: tool.error("Malformed range.", grammar.getFilename(), t1
506: .getLine(), t1.getColumn());
507: return;
508: }
509:
510: // Generate a warning for non-lowercase ASCII when case-insensitive
511: if (!((LexerGrammar) grammar).caseSensitive) {
512: if (rangeMin < 128
513: && Character.toLowerCase((char) rangeMin) != (char) rangeMin) {
514: tool
515: .warning(
516: "Character literal must be lowercase when caseSensitive=false",
517: grammar.getFilename(), t1.getLine(), t1
518: .getColumn());
519: }
520: if (rangeMax < 128
521: && Character.toLowerCase((char) rangeMax) != (char) rangeMax) {
522: tool
523: .warning(
524: "Character literal must be lowercase when caseSensitive=false",
525: grammar.getFilename(), t2.getLine(), t2
526: .getColumn());
527: }
528: }
529:
530: super .refCharRange(t1, t2, label, autoGenType, lastInRule);
531: CharRangeElement cr = new CharRangeElement(
532: (LexerGrammar) grammar, t1, t2, autoGenType);
533: addElementToCurrentAlt(cr);
534: labelElement(cr, label);
535:
536: // if ignore option is set, must add an optional call to the specified rule.
537: String ignore = ruleBlock.getIgnoreRule();
538: if (!lastInRule && ignore != null) {
539: addElementToCurrentAlt(createOptionalRuleRef(ignore, t1));
540: }
541: }
542:
543: public void refTokensSpecElementOption(Token tok, Token option,
544: Token value) {
545: /*
546: System.out.println("setting tokens spec option for "+tok.getText());
547: System.out.println(option.getText()+","+value.getText());
548: */
549: TokenSymbol ts = (TokenSymbol) grammar.tokenManager
550: .getTokenSymbol(tok.getText());
551: if (ts == null) {
552: tool.panic("cannot find " + tok.getText()
553: + "in tokens {...}");
554: }
555: if (option.getText().equals("AST")) {
556: ts.setASTNodeType(value.getText());
557: } else {
558: grammar.antlrTool.error(
559: "invalid tokens {...} element option:"
560: + option.getText(), grammar.getFilename(),
561: option.getLine(), option.getColumn());
562: }
563: }
564:
565: public void refElementOption(Token option, Token value) {
566: /*
567: System.out.println("setting option for "+context().currentElement());
568: System.out.println(option.getText()+","+value.getText());
569: */
570: AlternativeElement e = context().currentElement();
571: if (e instanceof StringLiteralElement
572: || e instanceof TokenRefElement
573: || e instanceof WildcardElement) {
574: ((GrammarAtom) e).setOption(option, value);
575: } else {
576: tool.error("cannot use element option (" + option.getText()
577: + ") for this kind of element", grammar
578: .getFilename(), option.getLine(), option
579: .getColumn());
580: }
581: }
582:
583: /** Add an exception handler to an exception spec */
584: public void refExceptionHandler(Token exTypeAndName, Token action) {
585: super .refExceptionHandler(exTypeAndName, action);
586: if (currentExceptionSpec == null) {
587: tool.panic("exception handler processing internal error");
588: }
589: currentExceptionSpec.addHandler(new ExceptionHandler(
590: exTypeAndName, action));
591: }
592:
593: public void refInitAction(Token action) {
594: super .refAction(action);
595: context().block.setInitAction(action.getText());
596: }
597:
598: public void refMemberAction(Token act) {
599: grammar.classMemberAction = act;
600: }
601:
602: public void refPreambleAction(Token act) {
603: super .refPreambleAction(act);
604: }
605:
606: // Only called for rule blocks
607: public void refReturnAction(Token returnAction) {
608: if (grammar instanceof LexerGrammar) {
609: String name = CodeGenerator
610: .encodeLexerRuleName(((RuleBlock) context().block)
611: .getRuleName());
612: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(name);
613: if (rs.access.equals("public")) {
614: tool
615: .warning(
616: "public Lexical rules cannot specify return type",
617: grammar.getFilename(), returnAction
618: .getLine(), returnAction
619: .getColumn());
620: return;
621: }
622: }
623: ((RuleBlock) context().block).returnAction = returnAction
624: .getText();
625: }
626:
627: public void refRule(Token idAssign, Token r, Token label,
628: Token args, int autoGenType) {
629: // Disallow parser rule references in the lexer
630: if (grammar instanceof LexerGrammar) {
631: // if (!Character.isUpperCase(r.getText().charAt(0))) {
632: if (r.type != ANTLRTokenTypes.TOKEN_REF) {
633: tool.error("Parser rule " + r.getText()
634: + " referenced in lexer");
635: return;
636: }
637: if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
638: tool.error("AST specification ^ not allowed in lexer",
639: grammar.getFilename(), r.getLine(), r
640: .getColumn());
641: }
642: }
643:
644: super .refRule(idAssign, r, label, args, autoGenType);
645: lastRuleRef = new RuleRefElement(grammar, r, autoGenType);
646: if (args != null) {
647: lastRuleRef.setArgs(args.getText());
648: }
649: if (idAssign != null) {
650: lastRuleRef.setIdAssign(idAssign.getText());
651: }
652: addElementToCurrentAlt(lastRuleRef);
653:
654: String id = r.getText();
655: // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
656: if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
657: id = CodeGenerator.encodeLexerRuleName(id);
658: }
659: // update symbol table so it knows what nodes reference the rule.
660: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(id);
661: rs.addReference(lastRuleRef);
662: labelElement(lastRuleRef, label);
663: }
664:
665: public void refSemPred(Token pred) {
666: //System.out.println("refSemPred "+pred.getText());
667: super .refSemPred(pred);
668: //System.out.println("context().block: "+context().block);
669: if (context().currentAlt().atStart()) {
670: context().currentAlt().semPred = pred.getText();
671: } else {
672: ActionElement a = new ActionElement(grammar, pred);
673: a.isSemPred = true;
674: addElementToCurrentAlt(a);
675: }
676: //System.out.println("DONE refSemPred "+pred.getText());
677: }
678:
679: public void refStringLiteral(Token lit, Token label,
680: int autoGenType, boolean lastInRule) {
681: super .refStringLiteral(lit, label, autoGenType, lastInRule);
682: if (grammar instanceof TreeWalkerGrammar
683: && autoGenType == GrammarElement.AUTO_GEN_CARET) {
684: tool.error("^ not allowed in here for tree-walker", grammar
685: .getFilename(), lit.getLine(), lit.getColumn());
686: }
687: StringLiteralElement sl = new StringLiteralElement(grammar,
688: lit, autoGenType);
689:
690: // If case-insensitive, then check each char of the stirng literal
691: if (grammar instanceof LexerGrammar
692: && !((LexerGrammar) grammar).caseSensitive) {
693: for (int i = 1; i < lit.getText().length() - 1; i++) {
694: char c = lit.getText().charAt(i);
695: if (c < 128 && Character.toLowerCase(c) != c) {
696: tool
697: .warning(
698: "Characters of string literal must be lowercase when caseSensitive=false",
699: grammar.getFilename(), lit
700: .getLine(), lit.getColumn());
701: break;
702: }
703: }
704: }
705:
706: addElementToCurrentAlt(sl);
707: labelElement(sl, label);
708:
709: // if ignore option is set, must add an optional call to the specified rule.
710: String ignore = ruleBlock.getIgnoreRule();
711: if (!lastInRule && ignore != null) {
712: addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
713: }
714: }
715:
716: public void refToken(Token idAssign, Token t, Token label,
717: Token args, boolean inverted, int autoGenType,
718: boolean lastInRule) {
719: if (grammar instanceof LexerGrammar) {
720: // In lexer, token references are really rule references
721: if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
722: tool.error("AST specification ^ not allowed in lexer",
723: grammar.getFilename(), t.getLine(), t
724: .getColumn());
725: }
726: if (inverted) {
727: tool.error("~TOKEN is not allowed in lexer", grammar
728: .getFilename(), t.getLine(), t.getColumn());
729: }
730: refRule(idAssign, t, label, args, autoGenType);
731:
732: // if ignore option is set, must add an optional call to the specified token rule.
733: String ignore = ruleBlock.getIgnoreRule();
734: if (!lastInRule && ignore != null) {
735: addElementToCurrentAlt(createOptionalRuleRef(ignore, t));
736: }
737: } else {
738: // Cannot have token ref args or assignment outside of lexer
739: if (idAssign != null) {
740: tool
741: .error(
742: "Assignment from token reference only allowed in lexer",
743: grammar.getFilename(), idAssign
744: .getLine(), idAssign
745: .getColumn());
746: }
747: if (args != null) {
748: tool
749: .error(
750: "Token reference arguments only allowed in lexer",
751: grammar.getFilename(), args.getLine(),
752: args.getColumn());
753: }
754: super .refToken(idAssign, t, label, args, inverted,
755: autoGenType, lastInRule);
756: TokenRefElement te = new TokenRefElement(grammar, t,
757: inverted, autoGenType);
758: addElementToCurrentAlt(te);
759: labelElement(te, label);
760: }
761: }
762:
763: public void refTokenRange(Token t1, Token t2, Token label,
764: int autoGenType, boolean lastInRule) {
765: if (grammar instanceof LexerGrammar) {
766: tool.error("Token range not allowed in lexer", grammar
767: .getFilename(), t1.getLine(), t1.getColumn());
768: return;
769: }
770: super .refTokenRange(t1, t2, label, autoGenType, lastInRule);
771: TokenRangeElement tr = new TokenRangeElement(grammar, t1, t2,
772: autoGenType);
773: if (tr.end < tr.begin) {
774: tool.error("Malformed range.", grammar.getFilename(), t1
775: .getLine(), t1.getColumn());
776: return;
777: }
778: addElementToCurrentAlt(tr);
779: labelElement(tr, label);
780: }
781:
782: public void refTreeSpecifier(Token treeSpec) {
783: context().currentAlt().treeSpecifier = treeSpec;
784: }
785:
786: public void refWildcard(Token t, Token label, int autoGenType) {
787: super .refWildcard(t, label, autoGenType);
788: WildcardElement wc = new WildcardElement(grammar, t,
789: autoGenType);
790: addElementToCurrentAlt(wc);
791: labelElement(wc, label);
792: }
793:
794: /** Get ready to process a new grammar */
795: public void reset() {
796: super .reset();
797: blocks = new LList();
798: lastRuleRef = null;
799: ruleEnd = null;
800: ruleBlock = null;
801: nested = 0;
802: currentExceptionSpec = null;
803: grammarError = false;
804: }
805:
806: public void setArgOfRuleRef(Token argAction) {
807: super .setArgOfRuleRef(argAction);
808: lastRuleRef.setArgs(argAction.getText());
809: }
810:
811: public static void setBlock(AlternativeBlock b, AlternativeBlock src) {
812: b.setAlternatives(src.getAlternatives());
813: b.initAction = src.initAction;
814: //b.lookaheadDepth = src.lookaheadDepth;
815: b.label = src.label;
816: b.hasASynPred = src.hasASynPred;
817: b.hasAnAction = src.hasAnAction;
818: b.warnWhenFollowAmbig = src.warnWhenFollowAmbig;
819: b.generateAmbigWarnings = src.generateAmbigWarnings;
820: b.line = src.line;
821: b.greedy = src.greedy;
822: b.greedySet = src.greedySet;
823: }
824:
825: public void setRuleOption(Token key, Token value) {
826: //((RuleBlock)context().block).setOption(key, value);
827: ruleBlock.setOption(key, value);
828: }
829:
830: public void setSubruleOption(Token key, Token value) {
831: ((AlternativeBlock) context().block).setOption(key, value);
832: }
833:
834: public void synPred() {
835: if (context().block.not) {
836: tool.error("'~' cannot be applied to syntactic predicate",
837: grammar.getFilename(), context().block.getLine(),
838: context().block.getColumn());
839: }
840: // create the right kind of object now that we know what that is
841: // and switch the list of alternatives. Adjust the stack of blocks.
842: // copy any init action also.
843: SynPredBlock b = new SynPredBlock(grammar);
844: setBlock(b, context().block);
845: BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
846: blocks.push(new BlockContext());
847: context().block = b;
848: context().blockEnd = old.blockEnd;
849: context().blockEnd.block = b;
850: }
851:
852: public void zeroOrMoreSubRule() {
853: if (context().block.not) {
854: tool.error("'~' cannot be applied to (...)+ subrule",
855: grammar.getFilename(), context().block.getLine(),
856: context().block.getColumn());
857: }
858: // create the right kind of object now that we know what that is
859: // and switch the list of alternatives. Adjust the stack of blocks.
860: // copy any init action also.
861: ZeroOrMoreBlock b = new ZeroOrMoreBlock(grammar);
862: setBlock(b, context().block);
863: BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
864: blocks.push(new BlockContext());
865: context().block = b;
866: context().blockEnd = old.blockEnd;
867: context().blockEnd.block = b;
868: }
869: }
|