Source Code Cross Referenced for TestConcurrentMergeScheduler.java in  » Search-Engine » lucene » org » apache » lucene » index » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Search Engine » lucene » org.apache.lucene.index 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        package org.apache.lucene.index;
002:
003:        /**
004:         * Licensed to the Apache Software Foundation (ASF) under one or more
005:         * contributor license agreements.  See the NOTICE file distributed with
006:         * this work for additional information regarding copyright ownership.
007:         * The ASF licenses this file to You under the Apache License, Version 2.0
008:         * (the "License"); you may not use this file except in compliance with
009:         * the License.  You may obtain a copy of the License at
010:         *
011:         *     http://www.apache.org/licenses/LICENSE-2.0
012:         *
013:         * Unless required by applicable law or agreed to in writing, software
014:         * distributed under the License is distributed on an "AS IS" BASIS,
015:         * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016:         * See the License for the specific language governing permissions and
017:         * limitations under the License.
018:         */
019:
020:        import org.apache.lucene.analysis.SimpleAnalyzer;
021:        import org.apache.lucene.analysis.Analyzer;
022:        import org.apache.lucene.store.MockRAMDirectory;
023:        import org.apache.lucene.store.RAMDirectory;
024:        import org.apache.lucene.document.Document;
025:        import org.apache.lucene.document.Field;
026:
027:        import org.apache.lucene.util.LuceneTestCase;
028:
029:        import java.io.IOException;
030:
031:        public class TestConcurrentMergeScheduler extends LuceneTestCase {
032:
033:            private static final Analyzer ANALYZER = new SimpleAnalyzer();
034:
035:            private static class FailOnlyOnFlush extends
036:                    MockRAMDirectory.Failure {
037:                boolean doFail = false;
038:
039:                public void setDoFail() {
040:                    this .doFail = true;
041:                }
042:
043:                public void clearDoFail() {
044:                    this .doFail = false;
045:                }
046:
047:                public void eval(MockRAMDirectory dir) throws IOException {
048:                    if (doFail) {
049:                        StackTraceElement[] trace = new Exception()
050:                                .getStackTrace();
051:                        for (int i = 0; i < trace.length; i++) {
052:                            if ("doFlush".equals(trace[i].getMethodName())) {
053:                                //new RuntimeException().printStackTrace(System.out);
054:                                throw new IOException(
055:                                        "now failing during flush");
056:                            }
057:                        }
058:                    }
059:                }
060:            }
061:
062:            // Make sure running BG merges still work fine even when
063:            // we are hitting exceptions during flushing.
064:            public void testFlushExceptions() throws IOException {
065:
066:                MockRAMDirectory directory = new MockRAMDirectory();
067:                FailOnlyOnFlush failure = new FailOnlyOnFlush();
068:                directory.failOn(failure);
069:
070:                IndexWriter writer = new IndexWriter(directory, ANALYZER, true);
071:                ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
072:                writer.setMergeScheduler(cms);
073:                writer.setMaxBufferedDocs(2);
074:                Document doc = new Document();
075:                Field idField = new Field("id", "", Field.Store.YES,
076:                        Field.Index.UN_TOKENIZED);
077:                doc.add(idField);
078:                for (int i = 0; i < 10; i++) {
079:                    for (int j = 0; j < 20; j++) {
080:                        idField.setValue(Integer.toString(i * 20 + j));
081:                        writer.addDocument(doc);
082:                    }
083:
084:                    writer.addDocument(doc);
085:
086:                    failure.setDoFail();
087:                    try {
088:                        writer.flush();
089:                        fail("failed to hit IOException");
090:                    } catch (IOException ioe) {
091:                        failure.clearDoFail();
092:                    }
093:                }
094:
095:                writer.close();
096:                IndexReader reader = IndexReader.open(directory);
097:                assertEquals(200, reader.numDocs());
098:                reader.close();
099:                directory.close();
100:            }
101:
102:            // Test that deletes committed after a merge started and
103:            // before it finishes, are correctly merged back:
104:            public void testDeleteMerging() throws IOException {
105:
106:                RAMDirectory directory = new MockRAMDirectory();
107:
108:                IndexWriter writer = new IndexWriter(directory, ANALYZER, true);
109:                ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
110:                writer.setMergeScheduler(cms);
111:
112:                LogDocMergePolicy mp = new LogDocMergePolicy();
113:                writer.setMergePolicy(mp);
114:
115:                // Force degenerate merging so we can get a mix of
116:                // merging of segments with and without deletes at the
117:                // start:
118:                mp.setMinMergeDocs(1000);
119:
120:                Document doc = new Document();
121:                Field idField = new Field("id", "", Field.Store.YES,
122:                        Field.Index.UN_TOKENIZED);
123:                doc.add(idField);
124:                for (int i = 0; i < 10; i++) {
125:                    for (int j = 0; j < 100; j++) {
126:                        idField.setValue(Integer.toString(i * 100 + j));
127:                        writer.addDocument(doc);
128:                    }
129:
130:                    int delID = i;
131:                    while (delID < 100 * (1 + i)) {
132:                        writer.deleteDocuments(new Term("id", "" + delID));
133:                        delID += 10;
134:                    }
135:
136:                    writer.flush();
137:                }
138:
139:                writer.close();
140:                IndexReader reader = IndexReader.open(directory);
141:                // Verify that we did not lose any deletes...
142:                assertEquals(450, reader.numDocs());
143:                reader.close();
144:                directory.close();
145:            }
146:
147:            public void testNoExtraFiles() throws IOException {
148:
149:                RAMDirectory directory = new MockRAMDirectory();
150:
151:                for (int pass = 0; pass < 2; pass++) {
152:
153:                    boolean autoCommit = pass == 0;
154:                    IndexWriter writer = new IndexWriter(directory, autoCommit,
155:                            ANALYZER, true);
156:
157:                    for (int iter = 0; iter < 7; iter++) {
158:                        ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
159:                        writer.setMergeScheduler(cms);
160:                        writer.setMaxBufferedDocs(2);
161:
162:                        for (int j = 0; j < 21; j++) {
163:                            Document doc = new Document();
164:                            doc.add(new Field("content", "a b c",
165:                                    Field.Store.NO, Field.Index.TOKENIZED));
166:                            writer.addDocument(doc);
167:                        }
168:
169:                        writer.close();
170:                        TestIndexWriter.assertNoUnreferencedFiles(directory,
171:                                "testNoExtraFiles autoCommit=" + autoCommit);
172:
173:                        // Reopen
174:                        writer = new IndexWriter(directory, autoCommit,
175:                                ANALYZER, false);
176:                    }
177:
178:                    writer.close();
179:                }
180:
181:                directory.close();
182:            }
183:
184:            public void testNoWaitClose() throws IOException {
185:                RAMDirectory directory = new MockRAMDirectory();
186:
187:                Document doc = new Document();
188:                Field idField = new Field("id", "", Field.Store.YES,
189:                        Field.Index.UN_TOKENIZED);
190:                doc.add(idField);
191:
192:                for (int pass = 0; pass < 2; pass++) {
193:                    boolean autoCommit = pass == 0;
194:                    IndexWriter writer = new IndexWriter(directory, autoCommit,
195:                            ANALYZER, true);
196:
197:                    for (int iter = 0; iter < 10; iter++) {
198:                        ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
199:                        writer.setMergeScheduler(cms);
200:                        writer.setMaxBufferedDocs(2);
201:                        writer.setMergeFactor(100);
202:
203:                        for (int j = 0; j < 201; j++) {
204:                            idField.setValue(Integer.toString(iter * 201 + j));
205:                            writer.addDocument(doc);
206:                        }
207:
208:                        int delID = iter * 201;
209:                        for (int j = 0; j < 20; j++) {
210:                            writer.deleteDocuments(new Term("id", Integer
211:                                    .toString(delID)));
212:                            delID += 5;
213:                        }
214:
215:                        // Force a bunch of merge threads to kick off so we
216:                        // stress out aborting them on close:
217:                        writer.setMergeFactor(3);
218:                        writer.addDocument(doc);
219:                        writer.flush();
220:
221:                        writer.close(false);
222:
223:                        IndexReader reader = IndexReader.open(directory);
224:                        assertEquals((1 + iter) * 182, reader.numDocs());
225:                        reader.close();
226:
227:                        // Reopen
228:                        writer = new IndexWriter(directory, autoCommit,
229:                                ANALYZER, false);
230:                    }
231:                    writer.close();
232:                }
233:
234:                directory.close();
235:            }
236:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.