001: /*
002: * Sun Public License Notice
003: *
004: * The contents of this file are subject to the Sun Public License
005: * Version 1.0 (the "License"). You may not use this file except in
006: * compliance with the License. A copy of the License is available at
007: * http://www.sun.com/
008: *
009: * The Original Code is NetBeans. The Initial Developer of the Original
010: * Code is Sun Microsystems, Inc. Portions Copyright 1997-2000 Sun
011: * Microsystems, Inc. All Rights Reserved.
012: */
013:
014: package org.netbeans.editor.ext;
015:
016: import java.util.HashMap;
017: import java.util.Map;
018:
019: import javax.swing.event.DocumentEvent;
020: import javax.swing.event.DocumentListener;
021: import javax.swing.text.BadLocationException;
022: import javax.swing.text.JTextComponent;
023:
024: import org.netbeans.editor.Analyzer;
025: import org.netbeans.editor.BaseDocument;
026: import org.netbeans.editor.FinderFactory;
027: import org.netbeans.editor.SettingsNames;
028: import org.netbeans.editor.SyntaxSupport;
029: import org.netbeans.editor.TextBatchProcessor;
030: import org.netbeans.editor.TokenContextPath;
031: import org.netbeans.editor.TokenID;
032: import org.netbeans.editor.TokenItem;
033: import org.netbeans.editor.TokenProcessor;
034: import org.netbeans.editor.Utilities;
035:
036: /**
037: * Support methods for syntax analyzes
038: *
039: * @author Miloslav Metelka
040: * @version 1.00
041: */
042:
043: public class ExtSyntaxSupport extends SyntaxSupport {
044:
045: public static final int COMPLETION_POPUP = 0;
046: public static final int COMPLETION_CANCEL = 1;
047: public static final int COMPLETION_REFRESH = 2;
048: public static final int COMPLETION_POST_REFRESH = 3;
049: public static final int COMPLETION_HIDE = 4;
050:
051: private static final TokenID[] EMPTY_TOKEN_ID_ARRAY = new TokenID[0];
052:
053: /**
054: * Listens for the changes on the document. Children can override the
055: * documentModified() method to perform some processing.
056: */
057: private DocumentListener docL;
058:
059: /** Map holding the [position, local-variable-map] pairs */
060: private HashMap localVarMaps = new HashMap();
061:
062: /** Map holding the [position, global-variable-map] pairs */
063: private HashMap globalVarMaps = new HashMap();
064:
065: public ExtSyntaxSupport(BaseDocument doc) {
066: super (doc);
067:
068: // Create listener to listen on document changes
069: docL = new DocumentListener() {
070: public void insertUpdate(DocumentEvent evt) {
071: documentModified(evt);
072: }
073:
074: public void removeUpdate(DocumentEvent evt) {
075: documentModified(evt);
076: }
077:
078: public void changedUpdate(DocumentEvent evt) {
079: }
080: };
081: getDocument().addDocumentListener(docL);
082: }
083:
084: /**
085: * Get the chain of the tokens for the given block of text. The returned
086: * chain of token-items reflects the tokens as they occur in the text and
087: * therefore the first token can start at the slightly lower position than
088: * the requested one. The chain itself can be extended automatically when
089: * reaching the first chain item and calling <tt>getPrevious()</tt> on it.
090: * Another chunk of the tokens will be parsed and the head of the chain will
091: * be extended. However this happens only in case there was no modification
092: * performed to the document between the creation of the chain and this
093: * moment. Otherwise this call throws <tt>IllegalStateException</tt>.
094: *
095: * @param startOffset
096: * starting position of the block
097: * @param endOffset
098: * ending position of the block
099: * @return the first item of the token-item chain or null if there are no
100: * tokens in the given area or the area is so small that it lays
101: * inside one token. To prevent this provide the area that spans a
102: * new-line.
103: */
104: public TokenItem getTokenChain(int startOffset, int endOffset)
105: throws BadLocationException {
106:
107: TokenItem chain = null;
108: BaseDocument doc = getDocument();
109: doc.readLock();
110: try {
111: int docLen = doc.getLength();
112: if (startOffset < docLen) {
113: TokenItemTP tp = new TokenItemTP();
114: tp.targetOffset = endOffset;
115: tokenizeText(tp, startOffset, endOffset, false);
116: chain = tp.getTokenChain();
117: }
118: } finally {
119: doc.readUnlock();
120: }
121:
122: return chain;
123: }
124:
125: /**
126: * Called when the document was modified by either the insert or removal.
127: *
128: * @param evt
129: * event received with the modification notification. getType()
130: * can be used to obtain the type of the event.
131: */
132: protected void documentModified(DocumentEvent evt) {
133: // Invalidate variable maps
134: localVarMaps.clear();
135: globalVarMaps.clear();
136: }
137:
138: /**
139: * Get the bracket finder that will search for the matching bracket or null
140: * if the bracket character doesn't belong to bracket characters.
141: */
142: protected BracketFinder getMatchingBracketFinder(char bracketChar) {
143: BracketFinder bf = new BracketFinder(bracketChar);
144: if (bf.moveCount == 0) { // not valid bracket char
145: bf = null;
146: }
147:
148: return bf;
149: }
150:
151: /**
152: * Find matching bracket or more generally block that matches with the
153: * current position.
154: *
155: * @param offset
156: * position of the starting bracket
157: * @param simple
158: * whether the search should skip comment and possibly other
159: * areas. This can be useful when the speed is critical, because
160: * the simple search is faster.
161: * @return array of integers containing starting and ending position of the
162: * block in the document. Null is returned if there's no matching
163: * block.
164: */
165: public int[] findMatchingBlock(int offset, boolean simpleSearch)
166: throws BadLocationException {
167: char bracketChar = getDocument().getChars(offset, 1)[0];
168: int foundPos = -1;
169:
170: final BracketFinder bf = getMatchingBracketFinder(bracketChar);
171:
172: if (bf != null) { // valid finder
173: if (!simpleSearch) {
174: TokenID tokenID = getTokenID(offset);
175: TokenID[] bst = getBracketSkipTokens();
176: for (int i = bst.length - 1; i >= 0; i--) {
177: if (tokenID == bst[i]) {
178: simpleSearch = true; // turn to simple search
179: break;
180: }
181: }
182: }
183:
184: if (simpleSearch) { // don't exclude comments etc.
185: if (bf.isForward()) {
186: foundPos = getDocument().find(bf, offset, -1);
187: } else {
188: foundPos = getDocument().find(bf, offset + 1, 0);
189: }
190:
191: } else { // exclude comments etc. from the search
192: TextBatchProcessor tbp = new TextBatchProcessor() {
193: public int processTextBatch(BaseDocument doc,
194: int startPos, int endPos, boolean lastBatch) {
195: try {
196: int[] blks = getTokenBlocks(startPos,
197: endPos, getBracketSkipTokens());
198: return findOutsideBlocks(bf, startPos,
199: endPos, blks);
200: } catch (BadLocationException e) {
201: return -1;
202: }
203: }
204: };
205:
206: if (bf.isForward()) {
207: foundPos = getDocument().processText(tbp, offset,
208: -1);
209: } else {
210: foundPos = getDocument().processText(tbp,
211: offset + 1, 0);
212: }
213: }
214: }
215:
216: return (foundPos != -1) ? new int[] { foundPos, foundPos + 1 }
217: : null;
218: }
219:
220: /**
221: * Get the array of token IDs that should be skipped when searching for
222: * matching bracket. It usually includes comments and character and string
223: * constants. Returns empty array by default.
224: */
225: protected TokenID[] getBracketSkipTokens() {
226: return EMPTY_TOKEN_ID_ARRAY;
227: }
228:
229: /**
230: * Gets the token-id of the token at the given position.
231: *
232: * @param offset
233: * position at which the token should be returned
234: * @return token-id of the token at the requested position. If there's no
235: * more tokens in the text, the <tt>Syntax.INVALID</tt> is
236: * returned.
237: */
238: public TokenID getTokenID(int offset) throws BadLocationException {
239: FirstTokenTP fttp = new FirstTokenTP();
240: tokenizeText(fttp, offset, getDocument().getLength(), true);
241: return fttp.getTokenID();
242: }
243:
244: /**
245: * Is the identifier at the position a function call? It first checks
246: * whether there is a identifier under the cursor and then it searches for
247: * the function call character - usually '('.
248: *
249: * @param identifierBlock
250: * int[2] block delimiting the identifier
251: * @return int[2] block or null if there's no function call
252: */
253: public int[] getFunctionBlock(int[] identifierBlock)
254: throws BadLocationException {
255: if (identifierBlock != null) {
256: int nwPos = Utilities.getFirstNonWhiteFwd(getDocument(),
257: identifierBlock[1]);
258: if ((nwPos >= 0)
259: && (getDocument().getChars(nwPos, 1)[0] == '(')) {
260: return new int[] { identifierBlock[0], nwPos + 1 };
261: }
262: }
263: return null;
264: }
265:
266: public int[] getFunctionBlock(int offset)
267: throws BadLocationException {
268: return getFunctionBlock(Utilities.getIdentifierBlock(
269: getDocument(), offset));
270: }
271:
272: public boolean isWhitespaceToken(TokenID tokenID, char[] buffer,
273: int offset, int tokenLength) {
274: return Analyzer.isWhitespace(buffer, offset, tokenLength);
275: }
276:
277: public boolean isCommentOrWhitespace(int startPos, int endPos)
278: throws BadLocationException {
279: CommentOrWhitespaceTP tp = new CommentOrWhitespaceTP(
280: getCommentTokens());
281: tokenizeText(tp, startPos, endPos, true);
282: return !tp.nonEmpty;
283: }
284:
285: /**
286: * Gets the last non-blank and non-comment character on the given line.
287: */
288: public int getRowLastValidChar(int offset)
289: throws BadLocationException {
290: return Utilities.getRowLastNonWhite(getDocument(), offset);
291: }
292:
293: /**
294: * Does the line contain some valid code besides of possible white space and
295: * comments?
296: */
297: public boolean isRowValid(int offset) throws BadLocationException {
298: return Utilities.isRowWhite(getDocument(), offset);
299: }
300:
301: /**
302: * Get the array of token IDs that denote the comments. Returns empty array
303: * by default.
304: */
305: public TokenID[] getCommentTokens() {
306: return EMPTY_TOKEN_ID_ARRAY;
307: }
308:
309: /**
310: * Get the blocks consisting of comments in a specified document area.
311: *
312: * @param doc
313: * document to work with
314: * @param startPos
315: * starting position of the searched document area
316: * @param endPos
317: * ending position of the searched document area
318: */
319: public int[] getCommentBlocks(int startPos, int endPos)
320: throws BadLocationException {
321: return getTokenBlocks(startPos, endPos, getCommentTokens());
322: }
323:
324: /**
325: * Find the type of the variable. The default behavior is to first search
326: * for the local variable declaration and then possibly for the global
327: * declaration and if the declaration position is found to get the first
328: * word on that position.
329: *
330: * @return it returns Object to enable the custom implementations to return
331: * the appropriate instances.
332: */
333: public Object findType(String varName, int varPos) {
334: Object type = null;
335: Map varMap = getLocalVariableMap(varPos); // first try local vars
336: if (varMap != null) {
337: type = varMap.get(varName);
338: }
339:
340: if (type == null) {
341: varMap = getGlobalVariableMap(varPos); // try global vars
342: if (varMap != null) {
343: type = varMap.get(varName);
344: }
345: }
346:
347: return type;
348: }
349:
350: public Map getLocalVariableMap(int offset) {
351: Integer posI = new Integer(offset);
352: Map varMap = (Map) localVarMaps.get(posI);
353: if (varMap == null) {
354: varMap = buildLocalVariableMap(offset);
355: localVarMaps.put(posI, varMap);
356: }
357: return varMap;
358: }
359:
360: protected Map buildLocalVariableMap(int offset) {
361: int methodStartPos = getMethodStartPosition(offset);
362: if (methodStartPos >= 0 && methodStartPos < offset) {
363: VariableMapTokenProcessor vmtp = createVariableMapTokenProcessor(
364: methodStartPos, offset);
365: try {
366: tokenizeText(vmtp, methodStartPos, offset, true);
367: return vmtp.getVariableMap();
368: } catch (BadLocationException e) {
369: // will default null
370: }
371: }
372: return null;
373: }
374:
375: public Map getGlobalVariableMap(int offset) {
376: Integer posI = new Integer(offset);
377: Map varMap = (Map) globalVarMaps.get(posI);
378: if (varMap == null) {
379: varMap = buildGlobalVariableMap(offset);
380: globalVarMaps.put(posI, varMap);
381: }
382: return varMap;
383: }
384:
385: protected Map buildGlobalVariableMap(int offset) {
386: int docLen = getDocument().getLength();
387: VariableMapTokenProcessor vmtp = createVariableMapTokenProcessor(
388: 0, docLen);
389: if (vmtp != null) {
390: try {
391: tokenizeText(vmtp, 0, docLen, true);
392: return vmtp.getVariableMap();
393: } catch (BadLocationException e) {
394: // will default null
395: }
396: }
397: return null;
398: }
399:
400: /**
401: * Get the start position of the method or the area where the declaration
402: * can start.
403: */
404: protected int getMethodStartPosition(int offset) {
405: return 0; // return begining of the document by default
406: }
407:
408: /**
409: * Find either the local or global declaration position. First try the local
410: * declaration and if it doesn't succeed, then try the global declaration.
411: */
412: public int findDeclarationPosition(String varName, int varPos) {
413: int offset = findLocalDeclarationPosition(varName, varPos);
414: if (offset < 0) {
415: offset = findGlobalDeclarationPosition(varName, varPos);
416: }
417: return offset;
418: }
419:
420: public int findLocalDeclarationPosition(String varName, int varPos) {
421: int methodStartPos = getMethodStartPosition(varPos);
422: if (methodStartPos >= 0 && methodStartPos < varPos) {
423: return findDeclarationPositionImpl(varName, methodStartPos,
424: varPos);
425: }
426: return -1;
427: }
428:
429: /**
430: * Get the position of the global declaration of a given variable. By
431: * default it's implemented to use the same token processor as for the local
432: * variables but the whole file is searched.
433: */
434: public int findGlobalDeclarationPosition(String varName, int varPos) {
435: return findDeclarationPositionImpl(varName, 0, getDocument()
436: .getLength());
437: }
438:
439: private int findDeclarationPositionImpl(String varName,
440: int startPos, int endPos) {
441: DeclarationTokenProcessor dtp = createDeclarationTokenProcessor(
442: varName, startPos, endPos);
443: if (dtp != null) {
444: try {
445: tokenizeText(dtp, startPos, endPos, true);
446: return dtp.getDeclarationPosition();
447: } catch (BadLocationException e) {
448: // will default to -1
449: }
450: }
451: return -1;
452: }
453:
454: protected DeclarationTokenProcessor createDeclarationTokenProcessor(
455: String varName, int startPos, int endPos) {
456: return null;
457: }
458:
459: protected VariableMapTokenProcessor createVariableMapTokenProcessor(
460: int startPos, int endPos) {
461: return null;
462: }
463:
464: /** Check and possibly popup, hide or refresh the completion */
465: public int checkCompletion(JTextComponent target, String typedText,
466: boolean visible) {
467: return visible ? COMPLETION_HIDE : COMPLETION_CANCEL;
468: }
469:
470: /** Check if sources for code completion are already available */
471: public boolean isPrepared() {
472: return true;
473: }
474:
475: /**
476: * Token processor extended to get declaration position of the given
477: * variable.
478: */
479: public interface DeclarationTokenProcessor extends TokenProcessor {
480:
481: /** Get the declaration position. */
482: public int getDeclarationPosition();
483:
484: }
485:
486: public interface VariableMapTokenProcessor extends TokenProcessor {
487:
488: /** Get the map that contains the pairs [variable-name, variable-type]. */
489: public Map getVariableMap();
490:
491: }
492:
493: /**
494: * Finder for the matching bracket. It gets the original bracket char and
495: * searches for the appropriate matching bracket character.
496: */
497: public class BracketFinder extends FinderFactory.GenericFinder {
498:
499: /** Original bracket char */
500: protected char bracketChar;
501:
502: /** Matching bracket char */
503: protected char matchChar;
504:
505: /** Depth of original brackets */
506: private int depth;
507:
508: /**
509: * Will it be a forward finder +1 or backward finder -1 or 0 when the
510: * given character is not bracket character.
511: */
512: protected int moveCount;
513:
514: /**
515: * @param bracketChar
516: * bracket char
517: */
518: public BracketFinder(char bracketChar) {
519: this .bracketChar = bracketChar;
520:
521: updateStatus();
522:
523: forward = (moveCount > 0);
524: }
525:
526: /**
527: * Check whether the bracketChar really contains the bracket character.
528: * If so assign the matchChar and moveCount variables.
529: */
530: protected boolean updateStatus() {
531: boolean valid = true;
532: switch (bracketChar) {
533: case '(':
534: matchChar = ')';
535: moveCount = +1;
536: break;
537: case ')':
538: matchChar = '(';
539: moveCount = -1;
540: break;
541: case '{':
542: matchChar = '}';
543: moveCount = +1;
544: break;
545: case '}':
546: matchChar = '{';
547: moveCount = -1;
548: break;
549: case '[':
550: matchChar = ']';
551: moveCount = +1;
552: break;
553: case ']':
554: matchChar = '[';
555: moveCount = -1;
556: break;
557:
558: default:
559: valid = false;
560: }
561: return valid;
562: }
563:
564: protected int scan(char ch, boolean lastChar) {
565: if (ch == bracketChar) {
566: depth++;
567: } else if (ch == matchChar) {
568: if (--depth == 0) {
569: found = true;
570: return 0;
571: }
572: }
573: return moveCount;
574: }
575:
576: }
577:
578: /** Create token-items */
579: final class TokenItemTP implements TokenProcessor {
580:
581: private Item firstItem;
582:
583: private Item lastItem;
584:
585: private DocumentListener docL;
586:
587: private boolean docModified;
588:
589: private int fwdBatchLineCnt;
590: private int bwdBatchLineCnt;
591:
592: private char[] buffer;
593:
594: private int bufferStartPos;
595:
596: /**
597: * Target position corresponding to the begining of the token that is
598: * already chained if searching for backward tokens, or, the last token
599: * that should be scanned if searching in forward direction.
600: */
601: int targetOffset;
602:
603: TokenItemTP() {
604: fwdBatchLineCnt = bwdBatchLineCnt = ((Integer) getDocument()
605: .getProperty(SettingsNames.LINE_BATCH_SIZE))
606: .intValue();
607: // Start listening on document changes
608: docL = new DocumentListener() {
609: public void insertUpdate(DocumentEvent evt) {
610: if (lastItem != null
611: && evt.getOffset() < lastItem.getOffset()
612: + lastItem.getImage().length()) {
613: docModified = true;
614: getDocument().removeDocumentListener(this );
615: docL = null;
616: }
617: }
618:
619: public void removeUpdate(DocumentEvent evt) {
620: if (lastItem != null
621: && evt.getOffset() < lastItem.getOffset()
622: + lastItem.getImage().length()) {
623: docModified = true;
624: getDocument().removeDocumentListener(this );
625: docL = null;
626: }
627: }
628:
629: public void changedUpdate(DocumentEvent evt) {
630: }
631: };
632: }
633:
634: protected void finalize() throws Throwable {
635: if (docL != null) {
636: getDocument().removeDocumentListener(docL);
637: docL = null;
638: }
639:
640: super .finalize();
641: }
642:
643: public TokenItem getTokenChain() {
644: return firstItem;
645: }
646:
647: public boolean token(TokenID tokenID,
648: TokenContextPath tokenContextPath,
649: int tokenBufferOffset, int tokenLength) {
650: if (bufferStartPos + tokenBufferOffset >= targetOffset) { // stop
651: // scanning
652: return false;
653: }
654:
655: lastItem = new Item(tokenID, tokenContextPath,
656: bufferStartPos + tokenBufferOffset, new String(
657: buffer, tokenBufferOffset, tokenLength),
658: lastItem);
659:
660: if (firstItem == null) { // not yet assigned
661: firstItem = lastItem;
662: }
663:
664: return true;
665: }
666:
667: public int eot(int offset) {
668: return ((Integer) getDocument().getProperty(
669: SettingsNames.MARK_DISTANCE)).intValue();
670: }
671:
672: public void nextBuffer(char[] buffer, int offset, int len,
673: int startPos, int preScan, boolean lastBuffer) {
674: this .buffer = buffer;
675: bufferStartPos = startPos - offset;
676: }
677:
678: Item getNextChunk(Item i) {
679: if (docModified) {
680: throw new IllegalStateException();
681: }
682: BaseDocument doc = getDocument();
683: int itemEndPos = i.getOffset() + i.getImage().length();
684: int docLen = doc.getLength();
685: if (itemEndPos == docLen) {
686: return null;
687: }
688:
689: int endPos;
690: try {
691: endPos = Utilities.getRowStart(doc, itemEndPos,
692: fwdBatchLineCnt);
693: } catch (BadLocationException e) {
694: return null;
695: }
696:
697: if (endPos == -1) { // past end of doc
698: endPos = docLen;
699: }
700: fwdBatchLineCnt *= 2; // larger batch in next call
701:
702: Item nextChunkHead = null;
703: Item fit = firstItem;
704: Item lit = lastItem;
705: try {
706: // Simulate initial conditions
707: firstItem = null;
708: lastItem = null;
709: targetOffset = endPos;
710:
711: tokenizeText(this , itemEndPos, endPos, false);
712: nextChunkHead = firstItem;
713:
714: } catch (BadLocationException e) {
715: } finally {
716: // Link previous last with the current first
717: if (firstItem != null) {
718: lit.next = firstItem;
719: firstItem.previous = lit;
720: }
721:
722: firstItem = fit;
723: if (lastItem == null) { // restore in case of no token or crash
724: lastItem = lit;
725: }
726: }
727:
728: return nextChunkHead;
729: }
730:
731: Item getPreviousChunk(Item i) {
732: if (docModified) {
733: throw new IllegalStateException();
734: }
735: BaseDocument doc = getDocument();
736: int itemStartPos = i.getOffset();
737: if (itemStartPos == 0) {
738: return null;
739: }
740:
741: int startPos;
742: try {
743: startPos = Utilities.getRowStart(doc, itemStartPos,
744: -bwdBatchLineCnt);
745: } catch (BadLocationException e) {
746: return null;
747: }
748:
749: if (startPos == -1) { // before begining of doc
750: startPos = 0;
751: }
752: bwdBatchLineCnt *= 2;
753:
754: Item previousChunkLast = null;
755: Item fit = firstItem;
756: Item lit = lastItem;
757: try {
758: // Simulate initial conditions
759: firstItem = null;
760: lastItem = null;
761: targetOffset = itemStartPos;
762:
763: tokenizeText(this , startPos, itemStartPos, false);
764: previousChunkLast = lastItem;
765:
766: } catch (BadLocationException e) {
767: } finally {
768: // Link previous last
769: if (lastItem != null) {
770: fit.previous = lastItem;
771: lastItem.next = fit;
772: }
773:
774: lastItem = lit;
775: if (firstItem == null) { // restore in case of no token or
776: // crash
777: firstItem = fit;
778: }
779: }
780:
781: return previousChunkLast;
782: }
783:
784: final class Item extends TokenItem.AbstractItem {
785:
786: Item previous;
787:
788: TokenItem next;
789:
790: Item(TokenID tokenID, TokenContextPath tokenContextPath,
791: int offset, String image, Item previous) {
792: super (tokenID, tokenContextPath, offset, image);
793: if (previous != null) {
794: this .previous = previous;
795: previous.next = this ;
796: }
797: }
798:
799: public TokenItem getNext() {
800: if (next == null) {
801: next = getNextChunk(this );
802: }
803: return next;
804: }
805:
806: public TokenItem getPrevious() {
807: if (previous == null) {
808: previous = getPreviousChunk(this );
809: }
810: return previous;
811: }
812:
813: }
814:
815: }
816:
817: /** Token processor that matches either the comments or whitespace */
818: class CommentOrWhitespaceTP implements TokenProcessor {
819:
820: private char[] buffer;
821:
822: private TokenID[] commentTokens;
823:
824: boolean nonEmpty;
825:
826: CommentOrWhitespaceTP(TokenID[] commentTokens) {
827: this .commentTokens = commentTokens;
828: }
829:
830: public boolean token(TokenID tokenID,
831: TokenContextPath tokenContextPath, int offset,
832: int tokenLength) {
833: for (int i = 0; i < commentTokens.length; i++) {
834: if (tokenID == commentTokens[i]) {
835: return true; // comment token found
836: }
837: }
838: boolean nonWS = isWhitespaceToken(tokenID, buffer, offset,
839: tokenLength);
840: if (nonWS) {
841: nonEmpty = true;
842: }
843: return nonWS;
844: }
845:
846: public int eot(int offset) {
847: return 0;
848: }
849:
850: public void nextBuffer(char[] buffer, int offset, int len,
851: int startPos, int preScan, boolean lastBuffer) {
852: this .buffer = buffer;
853: }
854:
855: }
856:
857: class FirstTokenTP implements TokenProcessor {
858:
859: private TokenID tokenID;
860:
861: public TokenID getTokenID() {
862: return tokenID;
863: }
864:
865: public boolean token(TokenID tokenID,
866: TokenContextPath tokenContextPath, int offset,
867: int tokenLen) {
868: this .tokenID = tokenID;
869: return false; // no more tokens
870: }
871:
872: public int eot(int offset) {
873: return 0;
874: }
875:
876: public void nextBuffer(char[] buffer, int offset, int len,
877: int startPos, int preScan, boolean lastBuffer) {
878: }
879:
880: }
881:
882: }
|