001: package com.sun.portal.util;
002:
003: import java.util.*;
004:
005: /**
006: * A complement to <code>java.util.StringTokenizer</code> that provides
007: * tokenizing by substrings instead of single characters as well as static
008: * utility methods for standard tasks.
009: * This class is semantically compatible with StringTokenizer. There is
010: * not a default delimiter as there is with
011: * <code>java.util.StringTokenizer</code>. There is only a single delimiter
012: * whereas <code>java.util.StringTokenizer</code> allows for multiple
013: * characters. The delimiter may be optionally case sensitive.
014: *
015: * @see "java.util.StringTokenizer"
016: *
017: * @author Todd Fast, todd.fast@sun.com
018: * @author Mike Frisino, michael.frisino@sun.com
019: * @version JATO/1.2.2 $Id: StringTokenizer2.java,v 1.1 2005/06/15 22:23:16 rt94277 Exp $
020: *
021: * This class is copied from JATO 1.2.2 util package.
022: *
023: */
024: public class StringTokenizer2 extends Object implements Enumeration,
025: Iterator {
026: /**
027: * Create tokenizer with property <code>returnTokens</code> set to
028: * <code>false</code> and property <code>ignoreCase</code> set to
029: * <code>false</code>. Blank delimiter results in the entire text
030: * as a single token.
031: *
032: * @param text string to be parsed (must not be null)
033: * @param delimiter to be be used to tokenize text (must not be null)
034: *
035: */
036: public StringTokenizer2(String text, String delimiter) {
037: this (text, delimiter, false);
038: }
039:
040: /**
041: * Create tokenizer with option for property <code>returnTokens</code>
042: * and property <code>ignoreCase</code> set to <code>false</code>. Blank
043: * delimiter results in the entire text as a single token.
044: *
045: * @param text string to be parsed (must not be null)
046: * @param delimiter to be be used to tokenize text (must not be null)
047: * @param returnTokens mimics <code>java.util.StringTokenizer</code> in that when <code>true</code> delimiters are returned as tokens
048: */
049: public StringTokenizer2(String text, String delimiter,
050: boolean returnTokens) {
051: this (text, delimiter, returnTokens, false);
052: }
053:
054: /**
055: * Create tokenizer with options for properties
056: * <code>returnTokens</code> and <code>ignoreCase</code>. Blank delimiter
057: * results in the entire text as a single token.
058: *
059: * @param text string to be parsed (must not be null)
060: * @param delimiter to be be used to tokenize text (must not be null)
061: * @param returnTokens mimics <code>java.util.StringTokenizer</code> in that when <code>true</code> delimiters are returned as tokens
062: * @param ignoreCase delimiters not case sensitive when <code>true</code>
063: */
064: public StringTokenizer2(String text, String delimiter,
065: boolean returnTokens, boolean ignoreCase) {
066: super ();
067: this .text = text;
068: this .delimiter = delimiter;
069: this .returnDelimiterTokens = returnTokens;
070: parse(ignoreCase);
071: }
072:
073: /**
074: *
075: *
076: */
077: private void parse(boolean ignoreCase) {
078: String matchText = null;
079: String matchDelim = null;
080:
081: if (ignoreCase) {
082: matchText = text.toUpperCase();
083: matchDelim = delimiter.toUpperCase();
084: } else {
085: matchText = text;
086: matchDelim = delimiter;
087: }
088:
089: int startIndex = 0;
090: int endIndex = matchText.indexOf(matchDelim, startIndex);
091: while (endIndex != -1) {
092: String token = text.substring(startIndex, endIndex);
093: parsedTokens.add(token);
094: if (returnDelimiterTokens)
095: parsedTokens.add(delimiter);
096: startIndex = endIndex + delimiter.length();
097: endIndex = matchText.indexOf(matchDelim, startIndex);
098: }
099:
100: parsedTokens.add(text.substring(startIndex));
101: }
102:
103: /**
104: *
105: * @see "java.util.StringTokenizer.hasNext()"
106: */
107: public boolean hasNext() {
108: return hasMoreTokens();
109: }
110:
111: /**
112: *
113: * @see "java.util.StringTokenizer.next()"
114: */
115: public Object next() {
116: return nextToken();
117: }
118:
119: /**
120: * Feature not supported
121: *
122: */
123: public void remove() {
124: throw new UnsupportedOperationException();
125: }
126:
127: /**
128: *
129: * @see "java.util.StringTokenizer.hasMoreTokens()"
130: */
131: public boolean hasMoreTokens() {
132: return tokenIndex < parsedTokens.size();
133: }
134:
135: /**
136: *
137: * @see "java.util.StringTokenizer.hasMoreElements()"
138: */
139: public boolean hasMoreElements() {
140: return hasMoreTokens();
141: }
142:
143: /**
144: *
145: * @see "java.util.StringTokenizer.countTokens()"
146: */
147: public int countTokens() {
148: return parsedTokens.size();
149: }
150:
151: /**
152: *
153: * @see "java.util.StringTokenizer.nextToken()"
154: */
155: public String nextToken() {
156: return (String) parsedTokens.get(tokenIndex++);
157: }
158:
159: /**
160: *
161: * @see "java.util.StringTokenizer.nextElement()"
162: */
163: public Object nextElement() {
164: return nextToken();
165: }
166:
167: ////////////////////////////////////////////////////////////////////////////////
168: // Static utility methods
169: ////////////////////////////////////////////////////////////////////////////////
170:
171: /**
172: * Performs a classic string find and replace
173: *
174: * @param str original string to be modified
175: * @param findValue text to be replaced throughout string (must not be null)
176: * @param replaceValue text to replace found tokens (must not be null)
177: * @return modified string
178: */
179: public static String replace(String str, String findValue,
180: String replaceValue) {
181: StringTokenizer2 tok = new StringTokenizer2(str, findValue,
182: false);
183:
184: String result = "";
185: for (int i = 0; i < tok.countTokens() - 1; i++)
186: result += tok.nextToken() + replaceValue;
187: result += tok.nextToken();
188:
189: return result;
190: }
191:
192: /**
193: * Performs a classic string find & replace, optionally ignoring the case
194: * of the string
195: *
196: * @param str original string to be modified
197: * @param findValue search text to be replaced throughout string (must not be null)
198: * @param replaceValue text to replace found tokens (must not be null)
199: * @param ignoreCase search text case insensitive when <code>true</code>
200: * @return modified string
201: */
202: public static String replace(String str, String findValue,
203: String replaceValue, boolean ignoreCase) {
204: StringTokenizer2 tok = new StringTokenizer2(str, findValue,
205: false, ignoreCase);
206:
207: StringBuffer result = new StringBuffer();
208: for (int i = 0; i < tok.countTokens() - 1; i++)
209: result.append(tok.nextToken()).append(replaceValue);
210: result.append(tok.nextToken());
211:
212: return result.toString();
213: }
214:
215: /**
216: * Shortcut to {@link #tokenize(String,String,boolean,boolean) generalized
217: * search method} with property <code>trim</code> set to <code>false
218: * </code> and property <code>ignoreCase</code> set to <code>false
219: * </code>
220: */
221: public static String[] tokenize(String str, String findValue) {
222: return tokenize(str, findValue, false);
223: }
224:
225: /**
226: * Shortcut to {@link #tokenize(String,String,boolean,boolean) generalized
227: * search method} with property <code>ignoreCase</code> set to <code>false
228: * </code>
229: *
230: */
231: public static String[] tokenize(String str, String findValue,
232: boolean trim) {
233: return tokenize(str, findValue, trim, false);
234: }
235:
236: /**
237: * Utility method to create array of string tokens with optional support for
238: * trimming results and ignoring case when searching.
239: *
240: * @param str text to be searched (must not be null)
241: * @param findValue search string (must not be null)
242: * @param trim flag indicating that resulting tokens should be trimmed
243: * @param ignoreCase flag indicating that search should be case insensitive
244: * @return array of string tokens resulting from search
245: *
246: */
247: public static String[] tokenize(String str, String findValue,
248: boolean trim, boolean ignoreCase) {
249: StringTokenizer2 tok = new StringTokenizer2(str, findValue,
250: false, ignoreCase);
251:
252: List result = new LinkedList();
253: for (int i = 0; i < tok.countTokens(); i++) {
254: if (trim)
255: result.add(((String) tok.nextToken()).trim());
256: else
257: result.add(((String) tok.nextToken()));
258: }
259:
260: return (String[]) result.toArray(new String[result.size()]);
261: }
262:
263: /**
264: * Utility method to breakup larger string into array of strings,
265: * one string per line.
266: *
267: */
268: public static String[] tokenizeLines(String string) {
269: StringTokenizer tok = new StringTokenizer(string, "\n\r", true);
270:
271: List result = new LinkedList();
272: String previousToken = null;
273: while (tok.hasMoreTokens()) {
274: String token = tok.nextToken();
275: if (token.equals("\r"))
276: ; //Discard
277: else if (token.equals("\n")) {
278: if (previousToken != null)
279: result.add(previousToken);
280: else
281: result.add(""); // Add a blank line
282:
283: previousToken = null;
284: } else
285: previousToken = token;
286: }
287:
288: // Make sure we get the last line, even if it didn't end
289: // with a carriage return
290: if (previousToken != null)
291: result.add(previousToken);
292:
293: return (String[]) result.toArray(new String[result.size()]);
294: }
295:
296: ////////////////////////////////////////////////////////////////////////////////
297: // Instance variables
298: ////////////////////////////////////////////////////////////////////////////////
299:
300: private String text;
301: private String delimiter;
302: private boolean returnDelimiterTokens = false;
303: private List parsedTokens = new ArrayList();
304: private int tokenIndex = 0;
305: }
|