001: package org.apache.lucene.index;
002:
003: /**
004: * Licensed to the Apache Software Foundation (ASF) under one or more
005: * contributor license agreements. See the NOTICE file distributed with
006: * this work for additional information regarding copyright ownership.
007: * The ASF licenses this file to You under the Apache License, Version 2.0
008: * (the "License"); you may not use this file except in compliance with
009: * the License. You may obtain a copy of the License at
010: *
011: * http://www.apache.org/licenses/LICENSE-2.0
012: *
013: * Unless required by applicable law or agreed to in writing, software
014: * distributed under the License is distributed on an "AS IS" BASIS,
015: * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016: * See the License for the specific language governing permissions and
017: * limitations under the License.
018: */
019:
020: import org.apache.lucene.analysis.SimpleAnalyzer;
021: import org.apache.lucene.analysis.Analyzer;
022: import org.apache.lucene.store.MockRAMDirectory;
023: import org.apache.lucene.store.RAMDirectory;
024: import org.apache.lucene.document.Document;
025: import org.apache.lucene.document.Field;
026:
027: import org.apache.lucene.util.LuceneTestCase;
028:
029: import java.io.IOException;
030:
031: public class TestConcurrentMergeScheduler extends LuceneTestCase {
032:
033: private static final Analyzer ANALYZER = new SimpleAnalyzer();
034:
035: private static class FailOnlyOnFlush extends
036: MockRAMDirectory.Failure {
037: boolean doFail = false;
038:
039: public void setDoFail() {
040: this .doFail = true;
041: }
042:
043: public void clearDoFail() {
044: this .doFail = false;
045: }
046:
047: public void eval(MockRAMDirectory dir) throws IOException {
048: if (doFail) {
049: StackTraceElement[] trace = new Exception()
050: .getStackTrace();
051: for (int i = 0; i < trace.length; i++) {
052: if ("doFlush".equals(trace[i].getMethodName())) {
053: //new RuntimeException().printStackTrace(System.out);
054: throw new IOException(
055: "now failing during flush");
056: }
057: }
058: }
059: }
060: }
061:
062: // Make sure running BG merges still work fine even when
063: // we are hitting exceptions during flushing.
064: public void testFlushExceptions() throws IOException {
065:
066: MockRAMDirectory directory = new MockRAMDirectory();
067: FailOnlyOnFlush failure = new FailOnlyOnFlush();
068: directory.failOn(failure);
069:
070: IndexWriter writer = new IndexWriter(directory, ANALYZER, true);
071: ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
072: writer.setMergeScheduler(cms);
073: writer.setMaxBufferedDocs(2);
074: Document doc = new Document();
075: Field idField = new Field("id", "", Field.Store.YES,
076: Field.Index.UN_TOKENIZED);
077: doc.add(idField);
078: for (int i = 0; i < 10; i++) {
079: for (int j = 0; j < 20; j++) {
080: idField.setValue(Integer.toString(i * 20 + j));
081: writer.addDocument(doc);
082: }
083:
084: writer.addDocument(doc);
085:
086: failure.setDoFail();
087: try {
088: writer.flush();
089: fail("failed to hit IOException");
090: } catch (IOException ioe) {
091: failure.clearDoFail();
092: }
093: }
094:
095: writer.close();
096: IndexReader reader = IndexReader.open(directory);
097: assertEquals(200, reader.numDocs());
098: reader.close();
099: directory.close();
100: }
101:
102: // Test that deletes committed after a merge started and
103: // before it finishes, are correctly merged back:
104: public void testDeleteMerging() throws IOException {
105:
106: RAMDirectory directory = new MockRAMDirectory();
107:
108: IndexWriter writer = new IndexWriter(directory, ANALYZER, true);
109: ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
110: writer.setMergeScheduler(cms);
111:
112: LogDocMergePolicy mp = new LogDocMergePolicy();
113: writer.setMergePolicy(mp);
114:
115: // Force degenerate merging so we can get a mix of
116: // merging of segments with and without deletes at the
117: // start:
118: mp.setMinMergeDocs(1000);
119:
120: Document doc = new Document();
121: Field idField = new Field("id", "", Field.Store.YES,
122: Field.Index.UN_TOKENIZED);
123: doc.add(idField);
124: for (int i = 0; i < 10; i++) {
125: for (int j = 0; j < 100; j++) {
126: idField.setValue(Integer.toString(i * 100 + j));
127: writer.addDocument(doc);
128: }
129:
130: int delID = i;
131: while (delID < 100 * (1 + i)) {
132: writer.deleteDocuments(new Term("id", "" + delID));
133: delID += 10;
134: }
135:
136: writer.flush();
137: }
138:
139: writer.close();
140: IndexReader reader = IndexReader.open(directory);
141: // Verify that we did not lose any deletes...
142: assertEquals(450, reader.numDocs());
143: reader.close();
144: directory.close();
145: }
146:
147: public void testNoExtraFiles() throws IOException {
148:
149: RAMDirectory directory = new MockRAMDirectory();
150:
151: for (int pass = 0; pass < 2; pass++) {
152:
153: boolean autoCommit = pass == 0;
154: IndexWriter writer = new IndexWriter(directory, autoCommit,
155: ANALYZER, true);
156:
157: for (int iter = 0; iter < 7; iter++) {
158: ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
159: writer.setMergeScheduler(cms);
160: writer.setMaxBufferedDocs(2);
161:
162: for (int j = 0; j < 21; j++) {
163: Document doc = new Document();
164: doc.add(new Field("content", "a b c",
165: Field.Store.NO, Field.Index.TOKENIZED));
166: writer.addDocument(doc);
167: }
168:
169: writer.close();
170: TestIndexWriter.assertNoUnreferencedFiles(directory,
171: "testNoExtraFiles autoCommit=" + autoCommit);
172:
173: // Reopen
174: writer = new IndexWriter(directory, autoCommit,
175: ANALYZER, false);
176: }
177:
178: writer.close();
179: }
180:
181: directory.close();
182: }
183:
184: public void testNoWaitClose() throws IOException {
185: RAMDirectory directory = new MockRAMDirectory();
186:
187: Document doc = new Document();
188: Field idField = new Field("id", "", Field.Store.YES,
189: Field.Index.UN_TOKENIZED);
190: doc.add(idField);
191:
192: for (int pass = 0; pass < 2; pass++) {
193: boolean autoCommit = pass == 0;
194: IndexWriter writer = new IndexWriter(directory, autoCommit,
195: ANALYZER, true);
196:
197: for (int iter = 0; iter < 10; iter++) {
198: ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
199: writer.setMergeScheduler(cms);
200: writer.setMaxBufferedDocs(2);
201: writer.setMergeFactor(100);
202:
203: for (int j = 0; j < 201; j++) {
204: idField.setValue(Integer.toString(iter * 201 + j));
205: writer.addDocument(doc);
206: }
207:
208: int delID = iter * 201;
209: for (int j = 0; j < 20; j++) {
210: writer.deleteDocuments(new Term("id", Integer
211: .toString(delID)));
212: delID += 5;
213: }
214:
215: // Force a bunch of merge threads to kick off so we
216: // stress out aborting them on close:
217: writer.setMergeFactor(3);
218: writer.addDocument(doc);
219: writer.flush();
220:
221: writer.close(false);
222:
223: IndexReader reader = IndexReader.open(directory);
224: assertEquals((1 + iter) * 182, reader.numDocs());
225: reader.close();
226:
227: // Reopen
228: writer = new IndexWriter(directory, autoCommit,
229: ANALYZER, false);
230: }
231: writer.close();
232: }
233:
234: directory.close();
235: }
236: }
|