001: package org.apache.lucene.index;
002:
003: /**
004: * Licensed to the Apache Software Foundation (ASF) under one or more
005: * contributor license agreements. See the NOTICE file distributed with
006: * this work for additional information regarding copyright ownership.
007: * The ASF licenses this file to You under the Apache License, Version 2.0
008: * (the "License"); you may not use this file except in compliance with
009: * the License. You may obtain a copy of the License at
010: *
011: * http://www.apache.org/licenses/LICENSE-2.0
012: *
013: * Unless required by applicable law or agreed to in writing, software
014: * distributed under the License is distributed on an "AS IS" BASIS,
015: * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016: * See the License for the specific language governing permissions and
017: * limitations under the License.
018: */
019:
020: import java.io.IOException;
021: import java.util.Arrays;
022:
023: import org.apache.lucene.util.LuceneTestCase;
024:
025: import org.apache.lucene.analysis.WhitespaceAnalyzer;
026: import org.apache.lucene.document.Document;
027: import org.apache.lucene.document.Field;
028: import org.apache.lucene.search.Hits;
029: import org.apache.lucene.search.IndexSearcher;
030: import org.apache.lucene.search.TermQuery;
031: import org.apache.lucene.store.Directory;
032: import org.apache.lucene.store.MockRAMDirectory;
033: import org.apache.lucene.store.RAMDirectory;
034:
035: public class TestIndexWriterDelete extends LuceneTestCase {
036:
037: // test the simple case
038: public void testSimpleCase() throws IOException {
039: String[] keywords = { "1", "2" };
040: String[] unindexed = { "Netherlands", "Italy" };
041: String[] unstored = { "Amsterdam has lots of bridges",
042: "Venice has lots of canals" };
043: String[] text = { "Amsterdam", "Venice" };
044:
045: for (int pass = 0; pass < 2; pass++) {
046: boolean autoCommit = (0 == pass);
047:
048: Directory dir = new RAMDirectory();
049: IndexWriter modifier = new IndexWriter(dir, autoCommit,
050: new WhitespaceAnalyzer(), true);
051: modifier.setUseCompoundFile(true);
052: modifier.setMaxBufferedDeleteTerms(1);
053:
054: for (int i = 0; i < keywords.length; i++) {
055: Document doc = new Document();
056: doc.add(new Field("id", keywords[i], Field.Store.YES,
057: Field.Index.UN_TOKENIZED));
058: doc.add(new Field("country", unindexed[i],
059: Field.Store.YES, Field.Index.NO));
060: doc.add(new Field("contents", unstored[i],
061: Field.Store.NO, Field.Index.TOKENIZED));
062: doc.add(new Field("city", text[i], Field.Store.YES,
063: Field.Index.TOKENIZED));
064: modifier.addDocument(doc);
065: }
066: modifier.optimize();
067:
068: if (!autoCommit) {
069: modifier.close();
070: }
071:
072: Term term = new Term("city", "Amsterdam");
073: int hitCount = getHitCount(dir, term);
074: assertEquals(1, hitCount);
075: if (!autoCommit) {
076: modifier = new IndexWriter(dir, autoCommit,
077: new WhitespaceAnalyzer());
078: modifier.setUseCompoundFile(true);
079: }
080: modifier.deleteDocuments(term);
081: if (!autoCommit) {
082: modifier.close();
083: }
084: hitCount = getHitCount(dir, term);
085: assertEquals(0, hitCount);
086:
087: if (autoCommit) {
088: modifier.close();
089: }
090: dir.close();
091: }
092: }
093:
094: // test when delete terms only apply to disk segments
095: public void testNonRAMDelete() throws IOException {
096: for (int pass = 0; pass < 2; pass++) {
097: boolean autoCommit = (0 == pass);
098:
099: Directory dir = new RAMDirectory();
100: IndexWriter modifier = new IndexWriter(dir, autoCommit,
101: new WhitespaceAnalyzer(), true);
102: modifier.setMaxBufferedDocs(2);
103: modifier.setMaxBufferedDeleteTerms(2);
104:
105: int id = 0;
106: int value = 100;
107:
108: for (int i = 0; i < 7; i++) {
109: addDoc(modifier, ++id, value);
110: }
111: modifier.flush();
112:
113: assertEquals(0, modifier.getNumBufferedDocuments());
114: assertTrue(0 < modifier.getSegmentCount());
115:
116: if (!autoCommit) {
117: modifier.close();
118: }
119:
120: IndexReader reader = IndexReader.open(dir);
121: assertEquals(7, reader.numDocs());
122: reader.close();
123:
124: if (!autoCommit) {
125: modifier = new IndexWriter(dir, autoCommit,
126: new WhitespaceAnalyzer());
127: modifier.setMaxBufferedDocs(2);
128: modifier.setMaxBufferedDeleteTerms(2);
129: }
130:
131: modifier.deleteDocuments(new Term("value", String
132: .valueOf(value)));
133: modifier.deleteDocuments(new Term("value", String
134: .valueOf(value)));
135:
136: if (!autoCommit) {
137: modifier.close();
138: }
139:
140: reader = IndexReader.open(dir);
141: assertEquals(0, reader.numDocs());
142: reader.close();
143: if (autoCommit) {
144: modifier.close();
145: }
146: dir.close();
147: }
148: }
149:
150: // test when delete terms only apply to ram segments
151: public void testRAMDeletes() throws IOException {
152: for (int pass = 0; pass < 2; pass++) {
153: boolean autoCommit = (0 == pass);
154: Directory dir = new RAMDirectory();
155: IndexWriter modifier = new IndexWriter(dir, autoCommit,
156: new WhitespaceAnalyzer(), true);
157: modifier.setMaxBufferedDocs(4);
158: modifier.setMaxBufferedDeleteTerms(4);
159:
160: int id = 0;
161: int value = 100;
162:
163: addDoc(modifier, ++id, value);
164: modifier.deleteDocuments(new Term("value", String
165: .valueOf(value)));
166: addDoc(modifier, ++id, value);
167: modifier.deleteDocuments(new Term("value", String
168: .valueOf(value)));
169:
170: assertEquals(2, modifier.getNumBufferedDeleteTerms());
171: assertEquals(1, modifier.getBufferedDeleteTermsSize());
172:
173: addDoc(modifier, ++id, value);
174: assertEquals(0, modifier.getSegmentCount());
175: modifier.flush();
176:
177: if (!autoCommit) {
178: modifier.close();
179: }
180:
181: IndexReader reader = IndexReader.open(dir);
182: assertEquals(1, reader.numDocs());
183:
184: int hitCount = getHitCount(dir, new Term("id", String
185: .valueOf(id)));
186: assertEquals(1, hitCount);
187: reader.close();
188: if (autoCommit) {
189: modifier.close();
190: }
191: dir.close();
192: }
193: }
194:
195: // test when delete terms apply to both disk and ram segments
196: public void testBothDeletes() throws IOException {
197: for (int pass = 0; pass < 2; pass++) {
198: boolean autoCommit = (0 == pass);
199:
200: Directory dir = new RAMDirectory();
201: IndexWriter modifier = new IndexWriter(dir, autoCommit,
202: new WhitespaceAnalyzer(), true);
203: modifier.setMaxBufferedDocs(100);
204: modifier.setMaxBufferedDeleteTerms(100);
205:
206: int id = 0;
207: int value = 100;
208:
209: for (int i = 0; i < 5; i++) {
210: addDoc(modifier, ++id, value);
211: }
212:
213: value = 200;
214: for (int i = 0; i < 5; i++) {
215: addDoc(modifier, ++id, value);
216: }
217: modifier.flush();
218:
219: for (int i = 0; i < 5; i++) {
220: addDoc(modifier, ++id, value);
221: }
222: modifier.deleteDocuments(new Term("value", String
223: .valueOf(value)));
224:
225: modifier.flush();
226: if (!autoCommit) {
227: modifier.close();
228: }
229:
230: IndexReader reader = IndexReader.open(dir);
231: assertEquals(5, reader.numDocs());
232: if (autoCommit) {
233: modifier.close();
234: }
235: }
236: }
237:
238: // test that batched delete terms are flushed together
239: public void testBatchDeletes() throws IOException {
240: for (int pass = 0; pass < 2; pass++) {
241: boolean autoCommit = (0 == pass);
242: Directory dir = new RAMDirectory();
243: IndexWriter modifier = new IndexWriter(dir, autoCommit,
244: new WhitespaceAnalyzer(), true);
245: modifier.setMaxBufferedDocs(2);
246: modifier.setMaxBufferedDeleteTerms(2);
247:
248: int id = 0;
249: int value = 100;
250:
251: for (int i = 0; i < 7; i++) {
252: addDoc(modifier, ++id, value);
253: }
254: modifier.flush();
255: if (!autoCommit) {
256: modifier.close();
257: }
258:
259: IndexReader reader = IndexReader.open(dir);
260: assertEquals(7, reader.numDocs());
261: reader.close();
262:
263: if (!autoCommit) {
264: modifier = new IndexWriter(dir, autoCommit,
265: new WhitespaceAnalyzer());
266: modifier.setMaxBufferedDocs(2);
267: modifier.setMaxBufferedDeleteTerms(2);
268: }
269:
270: id = 0;
271: modifier.deleteDocuments(new Term("id", String
272: .valueOf(++id)));
273: modifier.deleteDocuments(new Term("id", String
274: .valueOf(++id)));
275:
276: if (!autoCommit) {
277: modifier.close();
278: }
279:
280: reader = IndexReader.open(dir);
281: assertEquals(5, reader.numDocs());
282: reader.close();
283:
284: Term[] terms = new Term[3];
285: for (int i = 0; i < terms.length; i++) {
286: terms[i] = new Term("id", String.valueOf(++id));
287: }
288: if (!autoCommit) {
289: modifier = new IndexWriter(dir, autoCommit,
290: new WhitespaceAnalyzer());
291: modifier.setMaxBufferedDocs(2);
292: modifier.setMaxBufferedDeleteTerms(2);
293: }
294: modifier.deleteDocuments(terms);
295: if (!autoCommit) {
296: modifier.close();
297: }
298: reader = IndexReader.open(dir);
299: assertEquals(2, reader.numDocs());
300: reader.close();
301:
302: if (autoCommit) {
303: modifier.close();
304: }
305: dir.close();
306: }
307: }
308:
309: private void addDoc(IndexWriter modifier, int id, int value)
310: throws IOException {
311: Document doc = new Document();
312: doc.add(new Field("content", "aaa", Field.Store.NO,
313: Field.Index.TOKENIZED));
314: doc.add(new Field("id", String.valueOf(id), Field.Store.YES,
315: Field.Index.UN_TOKENIZED));
316: doc.add(new Field("value", String.valueOf(value),
317: Field.Store.NO, Field.Index.UN_TOKENIZED));
318: modifier.addDocument(doc);
319: }
320:
321: private int getHitCount(Directory dir, Term term)
322: throws IOException {
323: IndexSearcher searcher = new IndexSearcher(dir);
324: int hitCount = searcher.search(new TermQuery(term)).length();
325: searcher.close();
326: return hitCount;
327: }
328:
329: public void testDeletesOnDiskFull() throws IOException {
330: testOperationsOnDiskFull(false);
331: }
332:
333: public void testUpdatesOnDiskFull() throws IOException {
334: testOperationsOnDiskFull(true);
335: }
336:
337: /**
338: * Make sure if modifier tries to commit but hits disk full that modifier
339: * remains consistent and usable. Similar to TestIndexReader.testDiskFull().
340: */
341: private void testOperationsOnDiskFull(boolean updates)
342: throws IOException {
343:
344: boolean debug = false;
345: Term searchTerm = new Term("content", "aaa");
346: int START_COUNT = 157;
347: int END_COUNT = 144;
348:
349: for (int pass = 0; pass < 2; pass++) {
350: boolean autoCommit = (0 == pass);
351:
352: // First build up a starting index:
353: RAMDirectory startDir = new RAMDirectory();
354: IndexWriter writer = new IndexWriter(startDir, autoCommit,
355: new WhitespaceAnalyzer(), true);
356: for (int i = 0; i < 157; i++) {
357: Document d = new Document();
358: d.add(new Field("id", Integer.toString(i),
359: Field.Store.YES, Field.Index.UN_TOKENIZED));
360: d.add(new Field("content", "aaa " + i, Field.Store.NO,
361: Field.Index.TOKENIZED));
362: writer.addDocument(d);
363: }
364: writer.close();
365:
366: long diskUsage = startDir.sizeInBytes();
367: long diskFree = diskUsage + 10;
368:
369: IOException err = null;
370:
371: boolean done = false;
372:
373: // Iterate w/ ever increasing free disk space:
374: while (!done) {
375: MockRAMDirectory dir = new MockRAMDirectory(startDir);
376: IndexWriter modifier = new IndexWriter(dir, autoCommit,
377: new WhitespaceAnalyzer());
378:
379: modifier.setMaxBufferedDocs(1000); // use flush or close
380: modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
381:
382: // For each disk size, first try to commit against
383: // dir that will hit random IOExceptions & disk
384: // full; after, give it infinite disk space & turn
385: // off random IOExceptions & retry w/ same reader:
386: boolean success = false;
387:
388: for (int x = 0; x < 2; x++) {
389:
390: double rate = 0.1;
391: double diskRatio = ((double) diskFree) / diskUsage;
392: long this DiskFree;
393: String testName;
394:
395: if (0 == x) {
396: this DiskFree = diskFree;
397: if (diskRatio >= 2.0) {
398: rate /= 2;
399: }
400: if (diskRatio >= 4.0) {
401: rate /= 2;
402: }
403: if (diskRatio >= 6.0) {
404: rate = 0.0;
405: }
406: if (debug) {
407: System.out.println("\ncycle: " + diskFree
408: + " bytes");
409: }
410: testName = "disk full during reader.close() @ "
411: + this DiskFree + " bytes";
412: } else {
413: this DiskFree = 0;
414: rate = 0.0;
415: if (debug) {
416: System.out
417: .println("\ncycle: same writer: unlimited disk space");
418: }
419: testName = "reader re-use after disk full";
420: }
421:
422: dir.setMaxSizeInBytes(this DiskFree);
423: dir.setRandomIOExceptionRate(rate, diskFree);
424:
425: try {
426: if (0 == x) {
427: int docId = 12;
428: for (int i = 0; i < 13; i++) {
429: if (updates) {
430: Document d = new Document();
431: d.add(new Field("id", Integer
432: .toString(i),
433: Field.Store.YES,
434: Field.Index.UN_TOKENIZED));
435: d.add(new Field("content", "bbb "
436: + i, Field.Store.NO,
437: Field.Index.TOKENIZED));
438: modifier.updateDocument(new Term(
439: "id", Integer
440: .toString(docId)),
441: d);
442: } else { // deletes
443: modifier.deleteDocuments(new Term(
444: "id", Integer
445: .toString(docId)));
446: // modifier.setNorm(docId, "contents", (float)2.0);
447: }
448: docId += 12;
449: }
450: }
451: modifier.close();
452: success = true;
453: if (0 == x) {
454: done = true;
455: }
456: } catch (IOException e) {
457: if (debug) {
458: System.out.println(" hit IOException: "
459: + e);
460: e.printStackTrace(System.out);
461: }
462: err = e;
463: if (1 == x) {
464: e.printStackTrace();
465: fail(testName
466: + " hit IOException after disk space was freed up");
467: }
468: }
469:
470: // Whether we succeeded or failed, check that all
471: // un-referenced files were in fact deleted (ie,
472: // we did not create garbage). Just create a
473: // new IndexFileDeleter, have it delete
474: // unreferenced files, then verify that in fact
475: // no files were deleted:
476: String[] startFiles = dir.list();
477: SegmentInfos infos = new SegmentInfos();
478: infos.read(dir);
479: new IndexFileDeleter(dir,
480: new KeepOnlyLastCommitDeletionPolicy(),
481: infos, null, null);
482: String[] endFiles = dir.list();
483:
484: Arrays.sort(startFiles);
485: Arrays.sort(endFiles);
486:
487: // for(int i=0;i<startFiles.length;i++) {
488: // System.out.println(" startFiles: " + i + ": " + startFiles[i]);
489: // }
490:
491: if (!Arrays.equals(startFiles, endFiles)) {
492: String successStr;
493: if (success) {
494: successStr = "success";
495: } else {
496: successStr = "IOException";
497: err.printStackTrace();
498: }
499: fail("reader.close() failed to delete unreferenced files after "
500: + successStr
501: + " ("
502: + diskFree
503: + " bytes): before delete:\n "
504: + arrayToString(startFiles)
505: + "\n after delete:\n "
506: + arrayToString(endFiles));
507: }
508:
509: // Finally, verify index is not corrupt, and, if
510: // we succeeded, we see all docs changed, and if
511: // we failed, we see either all docs or no docs
512: // changed (transactional semantics):
513: IndexReader newReader = null;
514: try {
515: newReader = IndexReader.open(dir);
516: } catch (IOException e) {
517: e.printStackTrace();
518: fail(testName
519: + ":exception when creating IndexReader after disk full during close: "
520: + e);
521: }
522:
523: IndexSearcher searcher = new IndexSearcher(
524: newReader);
525: Hits hits = null;
526: try {
527: hits = searcher
528: .search(new TermQuery(searchTerm));
529: } catch (IOException e) {
530: e.printStackTrace();
531: fail(testName + ": exception when searching: "
532: + e);
533: }
534: int result2 = hits.length();
535: if (success) {
536: if (x == 0 && result2 != END_COUNT) {
537: fail(testName
538: + ": method did not throw exception but hits.length for search on term 'aaa' is "
539: + result2 + " instead of expected "
540: + END_COUNT);
541: } else if (x == 1 && result2 != START_COUNT
542: && result2 != END_COUNT) {
543: // It's possible that the first exception was
544: // "recoverable" wrt pending deletes, in which
545: // case the pending deletes are retained and
546: // then re-flushing (with plenty of disk
547: // space) will succeed in flushing the
548: // deletes:
549: fail(testName
550: + ": method did not throw exception but hits.length for search on term 'aaa' is "
551: + result2 + " instead of expected "
552: + START_COUNT + " or " + END_COUNT);
553: }
554: } else {
555: // On hitting exception we still may have added
556: // all docs:
557: if (result2 != START_COUNT
558: && result2 != END_COUNT) {
559: err.printStackTrace();
560: fail(testName
561: + ": method did throw exception but hits.length for search on term 'aaa' is "
562: + result2 + " instead of expected "
563: + START_COUNT + " or " + END_COUNT);
564: }
565: }
566:
567: searcher.close();
568: newReader.close();
569:
570: if (result2 == END_COUNT) {
571: break;
572: }
573: }
574:
575: dir.close();
576:
577: // Try again with 10 more bytes of free space:
578: diskFree += 10;
579: }
580: }
581: }
582:
583: // This test tests that buffered deletes are cleared when
584: // an Exception is hit during flush.
585: public void testErrorAfterApplyDeletes() throws IOException {
586:
587: MockRAMDirectory.Failure failure = new MockRAMDirectory.Failure() {
588: boolean sawMaybe = false;
589: boolean failed = false;
590:
591: public MockRAMDirectory.Failure reset() {
592: sawMaybe = false;
593: failed = false;
594: return this ;
595: }
596:
597: public void eval(MockRAMDirectory dir) throws IOException {
598: if (sawMaybe && !failed) {
599: boolean seen = false;
600: StackTraceElement[] trace = new Exception()
601: .getStackTrace();
602: for (int i = 0; i < trace.length; i++) {
603: if ("applyDeletes".equals(trace[i]
604: .getMethodName())) {
605: seen = true;
606: break;
607: }
608: }
609: if (!seen) {
610: // Only fail once we are no longer in applyDeletes
611: failed = true;
612: throw new IOException("fail after applyDeletes");
613: }
614: }
615: if (!failed) {
616: StackTraceElement[] trace = new Exception()
617: .getStackTrace();
618: for (int i = 0; i < trace.length; i++) {
619: if ("applyDeletes".equals(trace[i]
620: .getMethodName())) {
621: sawMaybe = true;
622: break;
623: }
624: }
625: }
626: }
627: };
628:
629: // create a couple of files
630:
631: String[] keywords = { "1", "2" };
632: String[] unindexed = { "Netherlands", "Italy" };
633: String[] unstored = { "Amsterdam has lots of bridges",
634: "Venice has lots of canals" };
635: String[] text = { "Amsterdam", "Venice" };
636:
637: for (int pass = 0; pass < 2; pass++) {
638: boolean autoCommit = (0 == pass);
639: MockRAMDirectory dir = new MockRAMDirectory();
640: IndexWriter modifier = new IndexWriter(dir, autoCommit,
641: new WhitespaceAnalyzer(), true);
642: modifier.setUseCompoundFile(true);
643: modifier.setMaxBufferedDeleteTerms(2);
644:
645: dir.failOn(failure.reset());
646:
647: for (int i = 0; i < keywords.length; i++) {
648: Document doc = new Document();
649: doc.add(new Field("id", keywords[i], Field.Store.YES,
650: Field.Index.UN_TOKENIZED));
651: doc.add(new Field("country", unindexed[i],
652: Field.Store.YES, Field.Index.NO));
653: doc.add(new Field("contents", unstored[i],
654: Field.Store.NO, Field.Index.TOKENIZED));
655: doc.add(new Field("city", text[i], Field.Store.YES,
656: Field.Index.TOKENIZED));
657: modifier.addDocument(doc);
658: }
659: // flush (and commit if ac)
660:
661: modifier.optimize();
662:
663: // commit if !ac
664:
665: if (!autoCommit) {
666: modifier.close();
667: }
668: // one of the two files hits
669:
670: Term term = new Term("city", "Amsterdam");
671: int hitCount = getHitCount(dir, term);
672: assertEquals(1, hitCount);
673:
674: // open the writer again (closed above)
675:
676: if (!autoCommit) {
677: modifier = new IndexWriter(dir, autoCommit,
678: new WhitespaceAnalyzer());
679: modifier.setUseCompoundFile(true);
680: }
681:
682: // delete the doc
683: // max buf del terms is two, so this is buffered
684:
685: modifier.deleteDocuments(term);
686:
687: // add a doc (needed for the !ac case; see below)
688: // doc remains buffered
689:
690: Document doc = new Document();
691: modifier.addDocument(doc);
692:
693: // flush the changes, the buffered deletes, and the new doc
694:
695: // The failure object will fail on the first write after the del
696: // file gets created when processing the buffered delete
697:
698: // in the ac case, this will be when writing the new segments
699: // files so we really don't need the new doc, but it's harmless
700:
701: // in the !ac case, a new segments file won't be created but in
702: // this case, creation of the cfs file happens next so we need
703: // the doc (to test that it's okay that we don't lose deletes if
704: // failing while creating the cfs file
705:
706: boolean failed = false;
707: try {
708: modifier.flush();
709: } catch (IOException ioe) {
710: failed = true;
711: }
712:
713: assertTrue(failed);
714:
715: // The flush above failed, so we need to retry it (which will
716: // succeed, because the failure is a one-shot)
717:
718: if (!autoCommit) {
719: modifier.close();
720: } else {
721: modifier.flush();
722: }
723:
724: hitCount = getHitCount(dir, term);
725:
726: // If the delete was not cleared then hit count will
727: // be 0. With autoCommit=false, we hit the exception
728: // on creating the compound file, so the delete was
729: // flushed successfully.
730: assertEquals(autoCommit ? 1 : 0, hitCount);
731:
732: if (autoCommit) {
733: modifier.close();
734: }
735:
736: dir.close();
737: }
738: }
739:
740: // This test tests that the files created by the docs writer before
741: // a segment is written are cleaned up if there's an i/o error
742:
743: public void testErrorInDocsWriterAdd() throws IOException {
744:
745: MockRAMDirectory.Failure failure = new MockRAMDirectory.Failure() {
746: boolean failed = false;
747:
748: public MockRAMDirectory.Failure reset() {
749: failed = false;
750: return this ;
751: }
752:
753: public void eval(MockRAMDirectory dir) throws IOException {
754: if (!failed) {
755: failed = true;
756: throw new IOException("fail in add doc");
757: }
758: }
759: };
760:
761: // create a couple of files
762:
763: String[] keywords = { "1", "2" };
764: String[] unindexed = { "Netherlands", "Italy" };
765: String[] unstored = { "Amsterdam has lots of bridges",
766: "Venice has lots of canals" };
767: String[] text = { "Amsterdam", "Venice" };
768:
769: for (int pass = 0; pass < 2; pass++) {
770: boolean autoCommit = (0 == pass);
771: MockRAMDirectory dir = new MockRAMDirectory();
772: IndexWriter modifier = new IndexWriter(dir, autoCommit,
773: new WhitespaceAnalyzer(), true);
774:
775: dir.failOn(failure.reset());
776:
777: for (int i = 0; i < keywords.length; i++) {
778: Document doc = new Document();
779: doc.add(new Field("id", keywords[i], Field.Store.YES,
780: Field.Index.UN_TOKENIZED));
781: doc.add(new Field("country", unindexed[i],
782: Field.Store.YES, Field.Index.NO));
783: doc.add(new Field("contents", unstored[i],
784: Field.Store.NO, Field.Index.TOKENIZED));
785: doc.add(new Field("city", text[i], Field.Store.YES,
786: Field.Index.TOKENIZED));
787: try {
788: modifier.addDocument(doc);
789: } catch (IOException io) {
790: break;
791: }
792: }
793:
794: String[] startFiles = dir.list();
795: SegmentInfos infos = new SegmentInfos();
796: infos.read(dir);
797: new IndexFileDeleter(dir,
798: new KeepOnlyLastCommitDeletionPolicy(), infos,
799: null, null);
800: String[] endFiles = dir.list();
801:
802: if (!Arrays.equals(startFiles, endFiles)) {
803: fail("docswriter abort() failed to delete unreferenced files:\n before delete:\n "
804: + arrayToString(startFiles)
805: + "\n after delete:\n "
806: + arrayToString(endFiles));
807: }
808:
809: modifier.close();
810:
811: }
812:
813: }
814:
815: private String arrayToString(String[] l) {
816: String s = "";
817: for (int i = 0; i < l.length; i++) {
818: if (i > 0) {
819: s += "\n ";
820: }
821: s += l[i];
822: }
823: return s;
824: }
825: }
|