001: package antlr;
002:
003: /* ANTLR Translator Generator
004: * Project led by Terence Parr at http://www.cs.usfca.edu
005: * Software rights: http://www.antlr.org/license.html
006: */
007:
008: import antlr.collections.Stack;
009: import antlr.collections.impl.LList;
010: import antlr.collections.impl.Vector;
011:
012: public class MakeGrammar extends DefineGrammarSymbols {
013:
014: protected Stack blocks = new LList(); // track subrules--Stack<BlockContext>
015: protected RuleRefElement lastRuleRef;
016:
017: protected RuleEndElement ruleEnd; // used if not nested
018: protected RuleBlock ruleBlock; // points to block of current rule.
019: protected int nested = 0; // nesting inside a subrule
020: protected boolean grammarError = false;
021:
022: ExceptionSpec currentExceptionSpec = null;
023:
024: public MakeGrammar(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
025: super (tool_, args_, analyzer_);
026: }
027:
028: /** Abort the processing of a grammar (due to syntax errors) */
029: public void abortGrammar() {
030: String s = "unknown grammar";
031: if (grammar != null) {
032: s = grammar.getClassName();
033: }
034: tool.error("aborting grammar '" + s + "' due to errors");
035: super .abortGrammar();
036: }
037:
038: protected void addElementToCurrentAlt(AlternativeElement e) {
039: e.enclosingRuleName = ruleBlock.ruleName;
040: context().addAlternativeElement(e);
041: }
042:
043: public void beginAlt(boolean doAutoGen_) {
044: super .beginAlt(doAutoGen_);
045: Alternative alt = new Alternative();
046: alt.setAutoGen(doAutoGen_);
047: context().block.addAlternative(alt);
048: }
049:
050: public void beginChildList() {
051: super .beginChildList();
052: context().block.addAlternative(new Alternative());
053: }
054:
055: /** Add an exception group to a rule (currently a no-op) */
056: public void beginExceptionGroup() {
057: super .beginExceptionGroup();
058: if (!(context().block instanceof RuleBlock)) {
059: tool
060: .fatalError("beginExceptionGroup called outside of rule block");
061: }
062: }
063:
064: /** Add an exception spec to an exception group or rule block */
065: public void beginExceptionSpec(Token label) {
066: // Hack the label string a bit to remove leading/trailing space.
067: if (label != null) {
068: label.setText(StringUtils.stripFront(StringUtils.stripBack(
069: label.getText(), " \n\r\t"), " \n\r\t"));
070: }
071: super .beginExceptionSpec(label);
072: // Don't check for currentExceptionSpec!=null because syntax errors
073: // may leave it set to something.
074: currentExceptionSpec = new ExceptionSpec(label);
075: }
076:
077: public void beginSubRule(Token label, Token start, boolean not) {
078: super .beginSubRule(label, start, not);
079: // we don't know what kind of subrule it is yet.
080: // push a dummy one that will allow us to collect the
081: // alternatives. Later, we'll switch to real object.
082: blocks.push(new BlockContext());
083: context().block = new AlternativeBlock(grammar, start, not);
084: context().altNum = 0; // reset alternative number
085: nested++;
086: // create a final node to which the last elememt of each
087: // alternative will point.
088: context().blockEnd = new BlockEndElement(grammar);
089: // make sure end node points to start of block
090: context().blockEnd.block = context().block;
091: labelElement(context().block, label);
092: }
093:
094: public void beginTree(Token tok) throws SemanticException {
095: if (!(grammar instanceof TreeWalkerGrammar)) {
096: tool.error("Trees only allowed in TreeParser", grammar
097: .getFilename(), tok.getLine(), tok.getColumn());
098: throw new SemanticException(
099: "Trees only allowed in TreeParser");
100: }
101: super .beginTree(tok);
102: blocks.push(new TreeBlockContext());
103: context().block = new TreeElement(grammar, tok);
104: context().altNum = 0; // reset alternative number
105: }
106:
107: public BlockContext context() {
108: if (blocks.height() == 0) {
109: return null;
110: } else {
111: return (BlockContext) blocks.top();
112: }
113: }
114:
115: /**Used to build nextToken() for the lexer.
116: * This builds a rule which has every "public" rule in the given Vector of
117: * rules as it's alternate. Each rule ref generates a Token object.
118: * @param g The Grammar that is being processed
119: * @param lexRules A vector of lexer rules that will be used to create an alternate block.
120: * @param rname The name of the resulting rule.
121: */
122: public static RuleBlock createNextTokenRule(Grammar g,
123: Vector lexRules, String rname) {
124: // create actual rule data structure
125: RuleBlock rb = new RuleBlock(g, rname);
126: rb.setDefaultErrorHandler(g.getDefaultErrorHandler());
127: RuleEndElement ruleEnd = new RuleEndElement(g);
128: rb.setEndElement(ruleEnd);
129: ruleEnd.block = rb;
130: // Add an alternative for each element of the rules vector.
131: for (int i = 0; i < lexRules.size(); i++) {
132: RuleSymbol r = (RuleSymbol) lexRules.elementAt(i);
133: if (!r.isDefined()) {
134: g.antlrTool.error("Lexer rule " + r.id.substring(1)
135: + " is not defined");
136: } else {
137: if (r.access.equals("public")) {
138: Alternative alt = new Alternative(); // create alt we'll add to ref rule
139: RuleBlock targetRuleBlock = r.getBlock();
140: Vector targetRuleAlts = targetRuleBlock
141: .getAlternatives();
142: // collect a sem pred if only one alt and it's at the start;
143: // simple, but faster to implement until real hoisting
144: if (targetRuleAlts != null
145: && targetRuleAlts.size() == 1) {
146: Alternative onlyAlt = (Alternative) targetRuleAlts
147: .elementAt(0);
148: if (onlyAlt.semPred != null) {
149: // ok, has sem pred, make this rule ref alt have a pred
150: alt.semPred = onlyAlt.semPred;
151: // REMOVE predicate from target rule??? NOPE, another
152: // rule other than nextToken() might invoke it.
153: }
154: }
155:
156: // create a rule ref to lexer rule
157: // the Token is a RULE_REF not a TOKEN_REF since the
158: // conversion to mRulename has already taken place
159: RuleRefElement rr = new RuleRefElement(g,
160: new CommonToken(ANTLRTokenTypes.RULE_REF, r
161: .getId()),
162: GrammarElement.AUTO_GEN_NONE);
163: rr.setLabel("theRetToken");
164: rr.enclosingRuleName = "nextToken";
165: rr.next = ruleEnd;
166: alt.addElement(rr); // add rule ref to alt
167: alt.setAutoGen(true); // keep text of elements
168: rb.addAlternative(alt); // add alt to rule block
169: r.addReference(rr); // track ref to this rule in rule blk
170: }
171: }
172: }
173:
174: rb.setAutoGen(true); // keep text of elements
175: rb.prepareForAnalysis();
176: //System.out.println(rb);
177: return rb;
178: }
179:
180: /** Return block as if they had typed: "( rule )?" */
181: private AlternativeBlock createOptionalRuleRef(String rule,
182: Token start) {
183: // Make the subrule
184: AlternativeBlock blk = new AlternativeBlock(grammar, start,
185: false);
186:
187: // Make sure rule is defined
188: String mrule = CodeGenerator.encodeLexerRuleName(rule); // can only be a lexer rule!
189: if (!grammar.isDefined(mrule)) {
190: grammar.define(new RuleSymbol(mrule));
191: }
192:
193: // Make the rule ref element
194: // RK: fixme probably easier to abuse start token..
195: Token t = new CommonToken(ANTLRTokenTypes.TOKEN_REF, rule);
196: t.setLine(start.getLine());
197: t.setLine(start.getColumn());
198: RuleRefElement rref = new RuleRefElement(grammar, t,
199: GrammarElement.AUTO_GEN_NONE);
200:
201: rref.enclosingRuleName = ruleBlock.ruleName;
202:
203: // Make the end of block element
204: BlockEndElement end = new BlockEndElement(grammar);
205: end.block = blk; // end block points back to start of blk
206:
207: // Make an alternative, putting the rule ref into it
208: Alternative alt = new Alternative(rref);
209: alt.addElement(end); // last element in alt points to end of block
210:
211: // Add the alternative to this block
212: blk.addAlternative(alt);
213:
214: // create an empty (optional) alt and add to blk
215: Alternative optAlt = new Alternative();
216: optAlt.addElement(end); // points immediately to end of block
217:
218: blk.addAlternative(optAlt);
219:
220: blk.prepareForAnalysis();
221: return blk;
222: }
223:
224: public void defineRuleName(Token r, String access,
225: boolean ruleAutoGen, String docComment)
226: throws SemanticException {
227: // if ( Character.isUpperCase(r.getText().charAt(0)) ) {
228: if (r.getType() == ANTLRTokenTypes.TOKEN_REF) {
229: if (!(grammar instanceof LexerGrammar)) {
230: tool.error("Lexical rule " + r.getText()
231: + " defined outside of lexer", grammar
232: .getFilename(), r.getLine(), r.getColumn());
233: r.setText(r.getText().toLowerCase());
234: }
235: } else {
236: if (grammar instanceof LexerGrammar) {
237: tool.error("Lexical rule names must be upper case, '"
238: + r.getText() + "' is not", grammar
239: .getFilename(), r.getLine(), r.getColumn());
240: r.setText(r.getText().toUpperCase());
241: }
242: }
243:
244: super .defineRuleName(r, access, ruleAutoGen, docComment);
245: String id = r.getText();
246: // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
247: if (r.getType() == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
248: id = CodeGenerator.encodeLexerRuleName(id);
249: }
250: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(id);
251: RuleBlock rb = new RuleBlock(grammar, r.getText(), r.getLine(),
252: ruleAutoGen);
253:
254: // Lexer rules do not generate default error handling
255: rb.setDefaultErrorHandler(grammar.getDefaultErrorHandler());
256:
257: ruleBlock = rb;
258: blocks.push(new BlockContext()); // enter new context
259: context().block = rb;
260: rs.setBlock(rb);
261: ruleEnd = new RuleEndElement(grammar);
262: rb.setEndElement(ruleEnd);
263: nested = 0;
264: }
265:
266: public void endAlt() {
267: super .endAlt();
268: if (nested == 0) { // all rule-level alts link to ruleEnd node
269: addElementToCurrentAlt(ruleEnd);
270: } else {
271: addElementToCurrentAlt(context().blockEnd);
272: }
273: context().altNum++;
274: }
275:
276: public void endChildList() {
277: super .endChildList();
278: // create a final node to which the last elememt of the single
279: // alternative will point. Done for compatibility with analyzer.
280: // Does NOT point to any block like alternative blocks because the
281: // TreeElement is not a block. This is used only as a placeholder.
282: BlockEndElement be = new BlockEndElement(grammar);
283: be.block = context().block;
284: addElementToCurrentAlt(be);
285: }
286:
287: public void endExceptionGroup() {
288: super .endExceptionGroup();
289: }
290:
291: public void endExceptionSpec() {
292: super .endExceptionSpec();
293: if (currentExceptionSpec == null) {
294: tool
295: .fatalError("Exception processing internal error -- no active exception spec");
296: }
297: if (context().block instanceof RuleBlock) {
298: // Named rule
299: ((RuleBlock) context().block)
300: .addExceptionSpec(currentExceptionSpec);
301: } else {
302: // It must be a plain-old alternative block
303: if (context().currentAlt().exceptionSpec != null) {
304: tool
305: .error(
306: "Alternative already has an exception specification",
307: grammar.getFilename(), context().block
308: .getLine(), context().block
309: .getColumn());
310: } else {
311: context().currentAlt().exceptionSpec = currentExceptionSpec;
312: }
313: }
314: currentExceptionSpec = null;
315: }
316:
317: /** Called at the end of processing a grammar */
318: public void endGrammar() {
319: if (grammarError) {
320: abortGrammar();
321: } else {
322: super .endGrammar();
323: }
324: }
325:
326: public void endRule(String rule) {
327: super .endRule(rule);
328: BlockContext ctx = (BlockContext) blocks.pop(); // remove scope
329: // record the start of this block in the ending node
330: ruleEnd.block = ctx.block;
331: ruleEnd.block.prepareForAnalysis();
332: //System.out.println(ctx.block);
333: }
334:
335: public void endSubRule() {
336: super .endSubRule();
337: nested--;
338: // remove subrule context from scope stack
339: BlockContext ctx = (BlockContext) blocks.pop();
340: AlternativeBlock block = ctx.block;
341:
342: // If the subrule is marked with ~, check that it is
343: // a valid candidate for analysis
344: if (block.not && !(block instanceof SynPredBlock)
345: && !(block instanceof ZeroOrMoreBlock)
346: && !(block instanceof OneOrMoreBlock)) {
347: if (!analyzer.subruleCanBeInverted(block,
348: grammar instanceof LexerGrammar)) {
349: String newline = System.getProperty("line.separator");
350: tool
351: .error(
352: "This subrule cannot be inverted. Only subrules of the form:"
353: + newline
354: + " (T1|T2|T3...) or"
355: + newline
356: + " ('c1'|'c2'|'c3'...)"
357: + newline
358: + "may be inverted (ranges are also allowed).",
359: grammar.getFilename(), block.getLine(),
360: block.getColumn());
361: }
362: }
363:
364: // add the subrule as element if not a syn pred
365: if (block instanceof SynPredBlock) {
366: // record a reference to the recently-recognized syn pred in the
367: // enclosing block.
368: SynPredBlock synpred = (SynPredBlock) block;
369: context().block.hasASynPred = true;
370: context().currentAlt().synPred = synpred;
371: grammar.hasSyntacticPredicate = true;
372: synpred.removeTrackingOfRuleRefs(grammar);
373: } else {
374: addElementToCurrentAlt(block);
375: }
376: ctx.blockEnd.block.prepareForAnalysis();
377: }
378:
379: public void endTree() {
380: super .endTree();
381: BlockContext ctx = (BlockContext) blocks.pop();
382: addElementToCurrentAlt(ctx.block); // add new TreeElement to enclosing alt.
383: }
384:
385: /** Remember that a major error occured in the grammar */
386: public void hasError() {
387: grammarError = true;
388: }
389:
390: private void labelElement(AlternativeElement el, Token label) {
391: if (label != null) {
392: // Does this label already exist?
393: for (int i = 0; i < ruleBlock.labeledElements.size(); i++) {
394: AlternativeElement altEl = (AlternativeElement) ruleBlock.labeledElements
395: .elementAt(i);
396: String l = altEl.getLabel();
397: if (l != null && l.equals(label.getText())) {
398: tool.error("Label '" + label.getText()
399: + "' has already been defined", grammar
400: .getFilename(), label.getLine(), label
401: .getColumn());
402: return;
403: }
404: }
405: // add this node to the list of labeled elements
406: el.setLabel(label.getText());
407: ruleBlock.labeledElements.appendElement(el);
408: }
409: }
410:
411: public void noASTSubRule() {
412: context().block.setAutoGen(false);
413: }
414:
415: public void oneOrMoreSubRule() {
416: if (context().block.not) {
417: tool.error("'~' cannot be applied to (...)* subrule",
418: grammar.getFilename(), context().block.getLine(),
419: context().block.getColumn());
420: }
421: // create the right kind of object now that we know what that is
422: // and switch the list of alternatives. Adjust the stack of blocks.
423: // copy any init action also.
424: OneOrMoreBlock b = new OneOrMoreBlock(grammar);
425: setBlock(b, context().block);
426: BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
427: blocks.push(new BlockContext());
428: context().block = b;
429: context().blockEnd = old.blockEnd;
430: context().blockEnd.block = b;
431: }
432:
433: public void optionalSubRule() {
434: if (context().block.not) {
435: tool.error("'~' cannot be applied to (...)? subrule",
436: grammar.getFilename(), context().block.getLine(),
437: context().block.getColumn());
438: }
439: // convert (X)? -> (X|) so that we can ignore optional blocks altogether!
440: // It already thinks that we have a simple subrule, just add option block.
441: beginAlt(false);
442: endAlt();
443: }
444:
445: public void refAction(Token action) {
446: super .refAction(action);
447: context().block.hasAnAction = true;
448: addElementToCurrentAlt(new ActionElement(grammar, action));
449: }
450:
451: public void setUserExceptions(String thr) {
452: ((RuleBlock) context().block).throwsSpec = thr;
453: }
454:
455: // Only called for rule blocks
456: public void refArgAction(Token action) {
457: ((RuleBlock) context().block).argAction = action.getText();
458: }
459:
460: public void refCharLiteral(Token lit, Token label,
461: boolean inverted, int autoGenType, boolean lastInRule) {
462: if (!(grammar instanceof LexerGrammar)) {
463: tool.error("Character literal only valid in lexer", grammar
464: .getFilename(), lit.getLine(), lit.getColumn());
465: return;
466: }
467: super .refCharLiteral(lit, label, inverted, autoGenType,
468: lastInRule);
469: CharLiteralElement cl = new CharLiteralElement(
470: (LexerGrammar) grammar, lit, inverted, autoGenType);
471:
472: // Generate a warning for non-lowercase ASCII when case-insensitive
473: if (!((LexerGrammar) grammar).caseSensitive
474: && cl.getType() < 128
475: && Character.toLowerCase((char) cl.getType()) != (char) cl
476: .getType()) {
477: tool
478: .warning(
479: "Character literal must be lowercase when caseSensitive=false",
480: grammar.getFilename(), lit.getLine(), lit
481: .getColumn());
482: }
483:
484: addElementToCurrentAlt(cl);
485: labelElement(cl, label);
486:
487: // if ignore option is set, must add an optional call to the specified rule.
488: String ignore = ruleBlock.getIgnoreRule();
489: if (!lastInRule && ignore != null) {
490: addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
491: }
492: }
493:
494: public void refCharRange(Token t1, Token t2, Token label,
495: int autoGenType, boolean lastInRule) {
496: if (!(grammar instanceof LexerGrammar)) {
497: tool.error("Character range only valid in lexer", grammar
498: .getFilename(), t1.getLine(), t1.getColumn());
499: return;
500: }
501: int rangeMin = ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
502: int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
503: if (rangeMax < rangeMin) {
504: tool.error("Malformed range.", grammar.getFilename(), t1
505: .getLine(), t1.getColumn());
506: return;
507: }
508:
509: // Generate a warning for non-lowercase ASCII when case-insensitive
510: if (!((LexerGrammar) grammar).caseSensitive) {
511: if (rangeMin < 128
512: && Character.toLowerCase((char) rangeMin) != (char) rangeMin) {
513: tool
514: .warning(
515: "Character literal must be lowercase when caseSensitive=false",
516: grammar.getFilename(), t1.getLine(), t1
517: .getColumn());
518: }
519: if (rangeMax < 128
520: && Character.toLowerCase((char) rangeMax) != (char) rangeMax) {
521: tool
522: .warning(
523: "Character literal must be lowercase when caseSensitive=false",
524: grammar.getFilename(), t2.getLine(), t2
525: .getColumn());
526: }
527: }
528:
529: super .refCharRange(t1, t2, label, autoGenType, lastInRule);
530: CharRangeElement cr = new CharRangeElement(
531: (LexerGrammar) grammar, t1, t2, autoGenType);
532: addElementToCurrentAlt(cr);
533: labelElement(cr, label);
534:
535: // if ignore option is set, must add an optional call to the specified rule.
536: String ignore = ruleBlock.getIgnoreRule();
537: if (!lastInRule && ignore != null) {
538: addElementToCurrentAlt(createOptionalRuleRef(ignore, t1));
539: }
540: }
541:
542: public void refTokensSpecElementOption(Token tok, Token option,
543: Token value) {
544: /*
545: System.out.println("setting tokens spec option for "+tok.getText());
546: System.out.println(option.getText()+","+value.getText());
547: */
548: TokenSymbol ts = (TokenSymbol) grammar.tokenManager
549: .getTokenSymbol(tok.getText());
550: if (ts == null) {
551: tool.fatalError("Cannot find " + tok.getText()
552: + "in tokens {...}");
553: }
554: if (option.getText().equals("AST")) {
555: ts.setASTNodeType(value.getText());
556: } else {
557: grammar.antlrTool.error(
558: "invalid tokens {...} element option:"
559: + option.getText(), grammar.getFilename(),
560: option.getLine(), option.getColumn());
561: }
562: }
563:
564: public void refElementOption(Token option, Token value) {
565: /*
566: System.out.println("setting option for "+context().currentElement());
567: System.out.println(option.getText()+","+value.getText());
568: */
569: AlternativeElement e = context().currentElement();
570: if (e instanceof StringLiteralElement
571: || e instanceof TokenRefElement
572: || e instanceof WildcardElement) {
573: ((GrammarAtom) e).setOption(option, value);
574: } else {
575: tool.error("cannot use element option (" + option.getText()
576: + ") for this kind of element", grammar
577: .getFilename(), option.getLine(), option
578: .getColumn());
579: }
580: }
581:
582: /** Add an exception handler to an exception spec */
583: public void refExceptionHandler(Token exTypeAndName, Token action) {
584: super .refExceptionHandler(exTypeAndName, action);
585: if (currentExceptionSpec == null) {
586: tool
587: .fatalError("Exception handler processing internal error");
588: }
589: currentExceptionSpec.addHandler(new ExceptionHandler(
590: exTypeAndName, action));
591: }
592:
593: public void refInitAction(Token action) {
594: super .refAction(action);
595: context().block.setInitAction(action.getText());
596: }
597:
598: public void refMemberAction(Token act) {
599: grammar.classMemberAction = act;
600: }
601:
602: public void refPreambleAction(Token act) {
603: super .refPreambleAction(act);
604: }
605:
606: // Only called for rule blocks
607: public void refReturnAction(Token returnAction) {
608: if (grammar instanceof LexerGrammar) {
609: String name = CodeGenerator
610: .encodeLexerRuleName(((RuleBlock) context().block)
611: .getRuleName());
612: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(name);
613: if (rs.access.equals("public")) {
614: tool
615: .warning(
616: "public Lexical rules cannot specify return type",
617: grammar.getFilename(), returnAction
618: .getLine(), returnAction
619: .getColumn());
620: return;
621: }
622: }
623: ((RuleBlock) context().block).returnAction = returnAction
624: .getText();
625: }
626:
627: public void refRule(Token idAssign, Token r, Token label,
628: Token args, int autoGenType) {
629: // Disallow parser rule references in the lexer
630: if (grammar instanceof LexerGrammar) {
631: // if (!Character.isUpperCase(r.getText().charAt(0))) {
632: if (r.getType() != ANTLRTokenTypes.TOKEN_REF) {
633: tool.error("Parser rule " + r.getText()
634: + " referenced in lexer");
635: return;
636: }
637: if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
638: tool.error("AST specification ^ not allowed in lexer",
639: grammar.getFilename(), r.getLine(), r
640: .getColumn());
641: }
642: }
643:
644: super .refRule(idAssign, r, label, args, autoGenType);
645: lastRuleRef = new RuleRefElement(grammar, r, autoGenType);
646: if (args != null) {
647: lastRuleRef.setArgs(args.getText());
648: }
649: if (idAssign != null) {
650: lastRuleRef.setIdAssign(idAssign.getText());
651: }
652: addElementToCurrentAlt(lastRuleRef);
653:
654: String id = r.getText();
655: // if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
656: if (r.getType() == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
657: id = CodeGenerator.encodeLexerRuleName(id);
658: }
659: // update symbol table so it knows what nodes reference the rule.
660: RuleSymbol rs = (RuleSymbol) grammar.getSymbol(id);
661: rs.addReference(lastRuleRef);
662: labelElement(lastRuleRef, label);
663: }
664:
665: public void refSemPred(Token pred) {
666: //System.out.println("refSemPred "+pred.getText());
667: super .refSemPred(pred);
668: //System.out.println("context().block: "+context().block);
669: if (context().currentAlt().atStart()) {
670: context().currentAlt().semPred = pred.getText();
671: } else {
672: ActionElement a = new ActionElement(grammar, pred);
673: a.isSemPred = true;
674: addElementToCurrentAlt(a);
675: }
676: //System.out.println("DONE refSemPred "+pred.getText());
677: }
678:
679: public void refStringLiteral(Token lit, Token label,
680: int autoGenType, boolean lastInRule) {
681: super .refStringLiteral(lit, label, autoGenType, lastInRule);
682: if (grammar instanceof TreeWalkerGrammar
683: && autoGenType == GrammarElement.AUTO_GEN_CARET) {
684: tool.error("^ not allowed in here for tree-walker", grammar
685: .getFilename(), lit.getLine(), lit.getColumn());
686: }
687: StringLiteralElement sl = new StringLiteralElement(grammar,
688: lit, autoGenType);
689:
690: // If case-insensitive, then check each char of the stirng literal
691: if (grammar instanceof LexerGrammar
692: && !((LexerGrammar) grammar).caseSensitive) {
693: for (int i = 1; i < lit.getText().length() - 1; i++) {
694: char c = lit.getText().charAt(i);
695: if (c < 128 && Character.toLowerCase(c) != c) {
696: tool
697: .warning(
698: "Characters of string literal must be lowercase when caseSensitive=false",
699: grammar.getFilename(), lit
700: .getLine(), lit.getColumn());
701: break;
702: }
703: }
704: }
705:
706: addElementToCurrentAlt(sl);
707: labelElement(sl, label);
708:
709: // if ignore option is set, must add an optional call to the specified rule.
710: String ignore = ruleBlock.getIgnoreRule();
711: if (!lastInRule && ignore != null) {
712: addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
713: }
714: }
715:
716: public void refToken(Token idAssign, Token t, Token label,
717: Token args, boolean inverted, int autoGenType,
718: boolean lastInRule) {
719: if (grammar instanceof LexerGrammar) {
720: // In lexer, token references are really rule references
721: if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
722: tool.error("AST specification ^ not allowed in lexer",
723: grammar.getFilename(), t.getLine(), t
724: .getColumn());
725: }
726: if (inverted) {
727: tool.error("~TOKEN is not allowed in lexer", grammar
728: .getFilename(), t.getLine(), t.getColumn());
729: }
730: refRule(idAssign, t, label, args, autoGenType);
731:
732: // if ignore option is set, must add an optional call to the specified token rule.
733: String ignore = ruleBlock.getIgnoreRule();
734: if (!lastInRule && ignore != null) {
735: addElementToCurrentAlt(createOptionalRuleRef(ignore, t));
736: }
737: } else {
738: // Cannot have token ref args or assignment outside of lexer
739: if (idAssign != null) {
740: tool
741: .error(
742: "Assignment from token reference only allowed in lexer",
743: grammar.getFilename(), idAssign
744: .getLine(), idAssign
745: .getColumn());
746: }
747: if (args != null) {
748: tool
749: .error(
750: "Token reference arguments only allowed in lexer",
751: grammar.getFilename(), args.getLine(),
752: args.getColumn());
753: }
754: super .refToken(idAssign, t, label, args, inverted,
755: autoGenType, lastInRule);
756: TokenRefElement te = new TokenRefElement(grammar, t,
757: inverted, autoGenType);
758: addElementToCurrentAlt(te);
759: labelElement(te, label);
760: }
761: }
762:
763: public void refTokenRange(Token t1, Token t2, Token label,
764: int autoGenType, boolean lastInRule) {
765: if (grammar instanceof LexerGrammar) {
766: tool.error("Token range not allowed in lexer", grammar
767: .getFilename(), t1.getLine(), t1.getColumn());
768: return;
769: }
770: super .refTokenRange(t1, t2, label, autoGenType, lastInRule);
771: TokenRangeElement tr = new TokenRangeElement(grammar, t1, t2,
772: autoGenType);
773: if (tr.end < tr.begin) {
774: tool.error("Malformed range.", grammar.getFilename(), t1
775: .getLine(), t1.getColumn());
776: return;
777: }
778: addElementToCurrentAlt(tr);
779: labelElement(tr, label);
780: }
781:
782: public void refTreeSpecifier(Token treeSpec) {
783: context().currentAlt().treeSpecifier = treeSpec;
784: }
785:
786: public void refWildcard(Token t, Token label, int autoGenType) {
787: super .refWildcard(t, label, autoGenType);
788: WildcardElement wc = new WildcardElement(grammar, t,
789: autoGenType);
790: addElementToCurrentAlt(wc);
791: labelElement(wc, label);
792: }
793:
794: /** Get ready to process a new grammar */
795: public void reset() {
796: super .reset();
797: blocks = new LList();
798: lastRuleRef = null;
799: ruleEnd = null;
800: ruleBlock = null;
801: nested = 0;
802: currentExceptionSpec = null;
803: grammarError = false;
804: }
805:
806: public void setArgOfRuleRef(Token argAction) {
807: super .setArgOfRuleRef(argAction);
808: lastRuleRef.setArgs(argAction.getText());
809: }
810:
811: public static void setBlock(AlternativeBlock b, AlternativeBlock src) {
812: b.setAlternatives(src.getAlternatives());
813: b.initAction = src.initAction;
814: //b.lookaheadDepth = src.lookaheadDepth;
815: b.label = src.label;
816: b.hasASynPred = src.hasASynPred;
817: b.hasAnAction = src.hasAnAction;
818: b.warnWhenFollowAmbig = src.warnWhenFollowAmbig;
819: b.generateAmbigWarnings = src.generateAmbigWarnings;
820: b.line = src.line;
821: b.greedy = src.greedy;
822: b.greedySet = src.greedySet;
823: b.combineChars = src.combineChars;
824: }
825:
826: public void setRuleOption(Token key, Token value) {
827: //((RuleBlock)context().block).setOption(key, value);
828: ruleBlock.setOption(key, value);
829: }
830:
831: public void setSubruleOption(Token key, Token value) {
832: ((AlternativeBlock) context().block).setOption(key, value);
833: }
834:
835: public void synPred() {
836: if (context().block.not) {
837: tool.error("'~' cannot be applied to syntactic predicate",
838: grammar.getFilename(), context().block.getLine(),
839: context().block.getColumn());
840: }
841: // create the right kind of object now that we know what that is
842: // and switch the list of alternatives. Adjust the stack of blocks.
843: // copy any init action also.
844: SynPredBlock b = new SynPredBlock(grammar);
845: setBlock(b, context().block);
846: BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
847: blocks.push(new BlockContext());
848: context().block = b;
849: context().blockEnd = old.blockEnd;
850: context().blockEnd.block = b;
851: }
852:
853: public void zeroOrMoreSubRule() {
854: if (context().block.not) {
855: tool.error("'~' cannot be applied to (...)+ subrule",
856: grammar.getFilename(), context().block.getLine(),
857: context().block.getColumn());
858: }
859: // create the right kind of object now that we know what that is
860: // and switch the list of alternatives. Adjust the stack of blocks.
861: // copy any init action also.
862: ZeroOrMoreBlock b = new ZeroOrMoreBlock(grammar);
863: setBlock(b, context().block);
864: BlockContext old = (BlockContext) blocks.pop(); // remove old scope; we want new type of subrule
865: blocks.push(new BlockContext());
866: context().block = b;
867: context().blockEnd = old.blockEnd;
868: context().blockEnd.block = b;
869: }
870: }
|