001: /*
002: * Licensed to the Apache Software Foundation (ASF) under one or more
003: * contributor license agreements. See the NOTICE file distributed with
004: * this work for additional information regarding copyright ownership.
005: * The ASF licenses this file to You under the Apache License, Version 2.0
006: * (the "License"); you may not use this file except in compliance with
007: * the License. You may obtain a copy of the License at
008: *
009: * http://www.apache.org/licenses/LICENSE-2.0
010: *
011: * Unless required by applicable law or agreed to in writing, software
012: * distributed under the License is distributed on an "AS IS" BASIS,
013: * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014: * See the License for the specific language governing permissions and
015: * limitations under the License.
016: */
017:
018: package org.apache.harmony.luni.tests.java.io;
019:
020: import java.io.ByteArrayInputStream;
021: import java.io.CharArrayReader;
022: import java.io.IOException;
023: import java.io.PipedInputStream;
024: import java.io.PipedOutputStream;
025: import java.io.Reader;
026: import java.io.StreamTokenizer;
027: import java.io.StringBufferInputStream;
028:
029: import tests.support.Support_StringReader;
030:
031: public class StreamTokenizerTest extends junit.framework.TestCase {
032: Support_StringReader r;
033:
034: StreamTokenizer st;
035:
036: String testString;
037:
038: /**
039: * @tests java.io.StreamTokenizer#StreamTokenizer(java.io.InputStream)
040: */
041: @SuppressWarnings("deprecation")
042: public void test_ConstructorLjava_io_InputStream()
043: throws IOException {
044: st = new StreamTokenizer(new StringBufferInputStream(
045: "/comments\n d 8 'h'"));
046:
047: assertEquals("the next token returned should be the letter d",
048: StreamTokenizer.TT_WORD, st.nextToken());
049: assertEquals("the next token returned should be the letter d",
050: "d", st.sval);
051:
052: assertEquals("the next token returned should be the digit 8",
053: StreamTokenizer.TT_NUMBER, st.nextToken());
054: assertEquals("the next token returned should be the digit 8",
055: 8.0, st.nval);
056:
057: assertEquals(
058: "the next token returned should be the quote character",
059: 39, st.nextToken());
060: assertEquals(
061: "the next token returned should be the quote character",
062: "h", st.sval);
063: }
064:
065: /**
066: * @tests java.io.StreamTokenizer#StreamTokenizer(java.io.Reader)
067: */
068: public void test_ConstructorLjava_io_Reader() throws IOException {
069: setTest("/testing\n d 8 'h' ");
070: assertEquals(
071: "the next token returned should be the letter d skipping the comments",
072: StreamTokenizer.TT_WORD, st.nextToken());
073: assertEquals("the next token returned should be the letter d",
074: "d", st.sval);
075:
076: assertEquals("the next token returned should be the digit 8",
077: StreamTokenizer.TT_NUMBER, st.nextToken());
078: assertEquals("the next token returned should be the digit 8",
079: 8.0, st.nval);
080:
081: assertEquals(
082: "the next token returned should be the quote character",
083: 39, st.nextToken());
084: assertEquals(
085: "the next token returned should be the quote character",
086: "h", st.sval);
087: }
088:
089: /**
090: * @tests java.io.StreamTokenizer#commentChar(int)
091: */
092: public void test_commentCharI() throws IOException {
093: setTest("*comment \n / 8 'h' ");
094: st.ordinaryChar('/');
095: st.commentChar('*');
096: assertEquals(
097: "nextToken() did not return the character / skiping the comments starting with *",
098: 47, st.nextToken());
099: assertTrue("the next token returned should be the digit 8", st
100: .nextToken() == StreamTokenizer.TT_NUMBER
101: && st.nval == 8.0);
102: assertTrue(
103: "the next token returned should be the quote character",
104: st.nextToken() == 39 && st.sval.equals("h"));
105: }
106:
107: /**
108: * @tests java.io.StreamTokenizer#eolIsSignificant(boolean)
109: */
110: public void test_eolIsSignificantZ() throws IOException {
111: setTest("d 8\n");
112: // by default end of line characters are not significant
113: assertTrue("nextToken did not return d",
114: st.nextToken() == StreamTokenizer.TT_WORD
115: && st.sval.equals("d"));
116: assertTrue("nextToken did not return 8",
117: st.nextToken() == StreamTokenizer.TT_NUMBER
118: && st.nval == 8.0);
119: assertTrue("nextToken should be the end of file", st
120: .nextToken() == StreamTokenizer.TT_EOF);
121: setTest("d\n");
122: st.eolIsSignificant(true);
123: // end of line characters are significant
124: assertTrue("nextToken did not return d",
125: st.nextToken() == StreamTokenizer.TT_WORD
126: && st.sval.equals("d"));
127: assertTrue("nextToken is the end of line",
128: st.nextToken() == StreamTokenizer.TT_EOL);
129: }
130:
131: /**
132: * @tests java.io.StreamTokenizer#lineno()
133: */
134: public void test_lineno() throws IOException {
135: setTest("d\n 8\n");
136: assertEquals("the lineno should be 1", 1, st.lineno());
137: st.nextToken();
138: st.nextToken();
139: assertEquals("the lineno should be 2", 2, st.lineno());
140: st.nextToken();
141: assertEquals("the next line no should be 3", 3, st.lineno());
142: }
143:
144: /**
145: * @tests java.io.StreamTokenizer#lowerCaseMode(boolean)
146: */
147: public void test_lowerCaseModeZ() throws Exception {
148: // SM.
149: setTest("HELLOWORLD");
150: st.lowerCaseMode(true);
151:
152: st.nextToken();
153: assertEquals("sval not converted to lowercase.", "helloworld",
154: st.sval);
155: }
156:
157: /**
158: * @tests java.io.StreamTokenizer#nextToken()
159: */
160: @SuppressWarnings("deprecation")
161: public void test_nextToken() throws IOException {
162: // SM.
163: setTest("\r\n/* fje fje 43.4 f \r\n f g */ 456.459 \r\n"
164: + "Hello / \r\n \r\n \n \r \257 Hi \'Hello World\'");
165: st.ordinaryChar('/');
166: st.slashStarComments(true);
167: st.nextToken();
168: assertTrue("Wrong Token type1: " + (char) st.ttype,
169: st.ttype == StreamTokenizer.TT_NUMBER);
170: st.nextToken();
171: assertTrue("Wrong Token type2: " + st.ttype,
172: st.ttype == StreamTokenizer.TT_WORD);
173: st.nextToken();
174: assertTrue("Wrong Token type3: " + st.ttype, st.ttype == '/');
175: st.nextToken();
176: assertTrue("Wrong Token type4: " + st.ttype,
177: st.ttype == StreamTokenizer.TT_WORD);
178: st.nextToken();
179: assertTrue("Wrong Token type5: " + st.ttype,
180: st.ttype == StreamTokenizer.TT_WORD);
181: st.nextToken();
182: assertTrue("Wrong Token type6: " + st.ttype, st.ttype == '\'');
183: assertTrue("Wrong Token type7: " + st.ttype, st.sval
184: .equals("Hello World"));
185: st.nextToken();
186: assertTrue("Wrong Token type8: " + st.ttype, st.ttype == -1);
187:
188: final PipedInputStream pin = new PipedInputStream();
189: PipedOutputStream pout = new PipedOutputStream(pin);
190: pout.write("hello\n\r\r".getBytes());
191: StreamTokenizer s = new StreamTokenizer(pin);
192: s.eolIsSignificant(true);
193: assertTrue("Wrong token 1,1",
194: s.nextToken() == StreamTokenizer.TT_WORD
195: && s.sval.equals("hello"));
196: assertTrue("Wrong token 1,2", s.nextToken() == '\n');
197: assertTrue("Wrong token 1,3", s.nextToken() == '\n');
198: assertTrue("Wrong token 1,4", s.nextToken() == '\n');
199: pout.close();
200: assertTrue("Wrong token 1,5",
201: s.nextToken() == StreamTokenizer.TT_EOF);
202: StreamTokenizer tokenizer = new StreamTokenizer(
203: new Support_StringReader("\n \r\n#"));
204: tokenizer.ordinaryChar('\n'); // make \n ordinary
205: tokenizer.eolIsSignificant(true);
206: assertTrue("Wrong token 2,1", tokenizer.nextToken() == '\n');
207: assertTrue("Wrong token 2,2", tokenizer.nextToken() == '\n');
208: assertEquals("Wrong token 2,3", '#', tokenizer.nextToken());
209: }
210:
211: /**
212: * @tests java.io.StreamTokenizer#ordinaryChar(int)
213: */
214: public void test_ordinaryCharI() throws IOException {
215: // SM.
216: setTest("Ffjein 893");
217: st.ordinaryChar('F');
218: st.nextToken();
219: assertTrue("OrdinaryChar failed." + (char) st.ttype,
220: st.ttype == 'F');
221: }
222:
223: /**
224: * @tests java.io.StreamTokenizer#ordinaryChars(int, int)
225: */
226: public void test_ordinaryCharsII() throws IOException {
227: // SM.
228: setTest("azbc iof z 893");
229: st.ordinaryChars('a', 'z');
230: assertEquals("OrdinaryChars failed.", 'a', st.nextToken());
231: assertEquals("OrdinaryChars failed.", 'z', st.nextToken());
232: }
233:
234: /**
235: * @tests java.io.StreamTokenizer#parseNumbers()
236: */
237: public void test_parseNumbers() throws IOException {
238: // SM
239: setTest("9.9 678");
240: assertTrue("Base behavior failed.",
241: st.nextToken() == StreamTokenizer.TT_NUMBER);
242: st.ordinaryChars('0', '9');
243: assertEquals("setOrdinary failed.", '6', st.nextToken());
244: st.parseNumbers();
245: assertTrue("parseNumbers failed.",
246: st.nextToken() == StreamTokenizer.TT_NUMBER);
247: }
248:
249: /**
250: * @tests java.io.StreamTokenizer#pushBack()
251: */
252: public void test_pushBack() throws IOException {
253: // SM.
254: setTest("Hello 897");
255: st.nextToken();
256: st.pushBack();
257: assertTrue("PushBack failed.",
258: st.nextToken() == StreamTokenizer.TT_WORD);
259: }
260:
261: /**
262: * @tests java.io.StreamTokenizer#quoteChar(int)
263: */
264: public void test_quoteCharI() throws IOException {
265: // SM
266: setTest("<Hello World< HelloWorldH");
267: st.quoteChar('<');
268: assertEquals("QuoteChar failed.", '<', st.nextToken());
269: assertEquals("QuoteChar failed.", "Hello World", st.sval);
270: st.quoteChar('H');
271: st.nextToken();
272: assertEquals("QuoteChar failed for word.", "elloWorld", st.sval);
273: }
274:
275: /**
276: * @tests java.io.StreamTokenizer#resetSyntax()
277: */
278: public void test_resetSyntax() throws IOException {
279: // SM
280: setTest("H 9\' ello World");
281: st.resetSyntax();
282: assertTrue("resetSyntax failed1." + (char) st.ttype, st
283: .nextToken() == 'H');
284: assertTrue("resetSyntax failed1." + (char) st.ttype, st
285: .nextToken() == ' ');
286: assertTrue("resetSyntax failed2." + (char) st.ttype, st
287: .nextToken() == '9');
288: assertTrue("resetSyntax failed3." + (char) st.ttype, st
289: .nextToken() == '\'');
290: }
291:
292: /**
293: * @tests java.io.StreamTokenizer#slashSlashComments(boolean)
294: */
295: public void test_slashSlashCommentsZ() throws IOException {
296: // SM.
297: setTest("// foo \r\n /fiji \r\n -456");
298: st.ordinaryChar('/');
299: st.slashSlashComments(true);
300: assertEquals("Test failed.", '/', st.nextToken());
301: assertTrue("Test failed.",
302: st.nextToken() == StreamTokenizer.TT_WORD);
303: }
304:
305: /**
306: * @tests java.io.StreamTokenizer#slashSlashComments(boolean)
307: */
308: public void test_slashSlashComments_withSSOpen() throws IOException {
309: Reader reader = new CharArrayReader("t // t t t".toCharArray());
310:
311: StreamTokenizer st = new StreamTokenizer(reader);
312: st.slashSlashComments(true);
313:
314: assertEquals(StreamTokenizer.TT_WORD, st.nextToken());
315: assertEquals(StreamTokenizer.TT_EOF, st.nextToken());
316: }
317:
318: /**
319: * @tests java.io.StreamTokenizer#slashSlashComments(boolean)
320: */
321: public void test_slashSlashComments_withSSOpen_NoComment()
322: throws IOException {
323: Reader reader = new CharArrayReader("// t".toCharArray());
324:
325: StreamTokenizer st = new StreamTokenizer(reader);
326: st.slashSlashComments(true);
327: st.ordinaryChar('/');
328:
329: assertEquals(StreamTokenizer.TT_EOF, st.nextToken());
330: }
331:
332: /**
333: * @tests java.io.StreamTokenizer#slashSlashComments(boolean)
334: */
335: public void test_slashSlashComments_withSSClosed()
336: throws IOException {
337: Reader reader = new CharArrayReader("// t".toCharArray());
338:
339: StreamTokenizer st = new StreamTokenizer(reader);
340: st.slashSlashComments(false);
341: st.ordinaryChar('/');
342:
343: assertEquals('/', st.nextToken());
344: assertEquals('/', st.nextToken());
345: assertEquals(StreamTokenizer.TT_WORD, st.nextToken());
346: }
347:
348: /**
349: * @tests java.io.StreamTokenizer#slashStarComments(boolean)
350: */
351: public void test_slashStarCommentsZ() throws IOException {
352: setTest("/* foo \r\n /fiji \r\n*/ -456");
353: st.ordinaryChar('/');
354: st.slashStarComments(true);
355: assertTrue("Test failed.",
356: st.nextToken() == StreamTokenizer.TT_NUMBER);
357: }
358:
359: /**
360: * @tests java.io.StreamTokenizer#slashStarComments(boolean)
361: */
362: public void test_slashStarComments_withSTOpen() throws IOException {
363: Reader reader = new CharArrayReader("t /* t */ t".toCharArray());
364:
365: StreamTokenizer st = new StreamTokenizer(reader);
366: st.slashStarComments(true);
367:
368: assertEquals(StreamTokenizer.TT_WORD, st.nextToken());
369: assertEquals(StreamTokenizer.TT_WORD, st.nextToken());
370: assertEquals(StreamTokenizer.TT_EOF, st.nextToken());
371: }
372:
373: /**
374: * @tests java.io.StreamTokenizer#slashStarComments(boolean)
375: */
376: public void test_slashStarComments_withSTClosed()
377: throws IOException {
378: Reader reader = new CharArrayReader("t /* t */ t".toCharArray());
379:
380: StreamTokenizer st = new StreamTokenizer(reader);
381: st.slashStarComments(false);
382:
383: assertEquals(StreamTokenizer.TT_WORD, st.nextToken());
384: assertEquals(StreamTokenizer.TT_EOF, st.nextToken());
385: }
386:
387: /**
388: * @tests java.io.StreamTokenizer#toString()
389: */
390: public void test_toString() throws IOException {
391: setTest("ABC Hello World");
392: st.nextToken();
393: assertTrue("toString failed." + st.toString(), st.toString()
394: .equals("Token[ABC], line 1"));
395:
396: // Regression test for HARMONY-4070
397: byte[] data = new byte[] { (byte) '-' };
398: StreamTokenizer tokenizer = new StreamTokenizer(
399: new ByteArrayInputStream(data));
400: tokenizer.nextToken();
401: String result = tokenizer.toString();
402: assertEquals("Token['-'], line 1", result);
403: }
404:
405: /**
406: * @tests java.io.StreamTokenizer#whitespaceChars(int, int)
407: */
408: public void test_whitespaceCharsII() throws IOException {
409: setTest("azbc iof z 893");
410: st.whitespaceChars('a', 'z');
411: assertTrue("OrdinaryChar failed.",
412: st.nextToken() == StreamTokenizer.TT_NUMBER);
413: }
414:
415: /**
416: * @tests java.io.StreamTokenizer#wordChars(int, int)
417: */
418: public void test_wordCharsII() throws IOException {
419: setTest("A893 -9B87");
420: st.wordChars('0', '9');
421: assertTrue("WordChar failed1.",
422: st.nextToken() == StreamTokenizer.TT_WORD);
423: assertEquals("WordChar failed2.", "A893", st.sval);
424: assertTrue("WordChar failed3.",
425: st.nextToken() == StreamTokenizer.TT_NUMBER);
426: st.nextToken();
427: assertEquals("WordChar failed4.", "B87", st.sval);
428:
429: setTest(" Hello World");
430: st.wordChars(' ', ' ');
431: st.nextToken();
432: assertEquals("WordChars failed for whitespace.", "Hello World",
433: st.sval);
434:
435: setTest(" Hello World\r\n \'Hello World\' Hello\' World");
436: st.wordChars(' ', ' ');
437: st.wordChars('\'', '\'');
438: st.nextToken();
439: assertTrue("WordChars failed for whitespace: " + st.sval,
440: st.sval.equals("Hello World"));
441: st.nextToken();
442: assertTrue("WordChars failed for quote1: " + st.sval, st.sval
443: .equals("\'Hello World\' Hello\' World"));
444: }
445:
446: private void setTest(String s) {
447: testString = s;
448: r = new Support_StringReader(testString);
449: st = new StreamTokenizer(r);
450: }
451:
452: protected void setUp() {
453: }
454:
455: protected void tearDown() {
456: }
457: }
|