001: package org.apache.lucene.search;
002:
003: /**
004: * Licensed to the Apache Software Foundation (ASF) under one or more
005: * contributor license agreements. See the NOTICE file distributed with
006: * this work for additional information regarding copyright ownership.
007: * The ASF licenses this file to You under the Apache License, Version 2.0
008: * (the "License"); you may not use this file except in compliance with
009: * the License. You may obtain a copy of the License at
010: *
011: * http://www.apache.org/licenses/LICENSE-2.0
012: *
013: * Unless required by applicable law or agreed to in writing, software
014: * distributed under the License is distributed on an "AS IS" BASIS,
015: * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016: * See the License for the specific language governing permissions and
017: * limitations under the License.
018: */
019:
020: import org.apache.lucene.document.Document;
021: import org.apache.lucene.document.FieldSelector;
022: import org.apache.lucene.index.CorruptIndexException;
023: import org.apache.lucene.index.Term;
024:
025: import java.io.IOException;
026: import java.util.HashMap;
027: import java.util.HashSet;
028: import java.util.Map;
029: import java.util.Set;
030:
031: /** Implements search over a set of <code>Searchables</code>.
032: *
033: * <p>Applications usually need only call the inherited {@link #search(Query)}
034: * or {@link #search(Query,Filter)} methods.
035: */
036: public class MultiSearcher extends Searcher {
037: /**
038: * Document Frequency cache acting as a Dummy-Searcher.
039: * This class is no full-fledged Searcher, but only supports
040: * the methods necessary to initialize Weights.
041: */
042: private static class CachedDfSource extends Searcher {
043: private Map dfMap; // Map from Terms to corresponding doc freqs
044: private int maxDoc; // document count
045:
046: public CachedDfSource(Map dfMap, int maxDoc,
047: Similarity similarity) {
048: this .dfMap = dfMap;
049: this .maxDoc = maxDoc;
050: setSimilarity(similarity);
051: }
052:
053: public int docFreq(Term term) {
054: int df;
055: try {
056: df = ((Integer) dfMap.get(term)).intValue();
057: } catch (NullPointerException e) {
058: throw new IllegalArgumentException("df for term "
059: + term.text() + " not available");
060: }
061: return df;
062: }
063:
064: public int[] docFreqs(Term[] terms) {
065: int[] result = new int[terms.length];
066: for (int i = 0; i < terms.length; i++) {
067: result[i] = docFreq(terms[i]);
068: }
069: return result;
070: }
071:
072: public int maxDoc() {
073: return maxDoc;
074: }
075:
076: public Query rewrite(Query query) {
077: // this is a bit of a hack. We know that a query which
078: // creates a Weight based on this Dummy-Searcher is
079: // always already rewritten (see preparedWeight()).
080: // Therefore we just return the unmodified query here
081: return query;
082: }
083:
084: public void close() {
085: throw new UnsupportedOperationException();
086: }
087:
088: public Document doc(int i) {
089: throw new UnsupportedOperationException();
090: }
091:
092: public Document doc(int i, FieldSelector fieldSelector) {
093: throw new UnsupportedOperationException();
094: }
095:
096: public Explanation explain(Weight weight, int doc) {
097: throw new UnsupportedOperationException();
098: }
099:
100: public void search(Weight weight, Filter filter,
101: HitCollector results) {
102: throw new UnsupportedOperationException();
103: }
104:
105: public TopDocs search(Weight weight, Filter filter, int n) {
106: throw new UnsupportedOperationException();
107: }
108:
109: public TopFieldDocs search(Weight weight, Filter filter, int n,
110: Sort sort) {
111: throw new UnsupportedOperationException();
112: }
113: }
114:
115: private Searchable[] searchables;
116: private int[] starts;
117: private int maxDoc = 0;
118:
119: /** Creates a searcher which searches <i>searchables</i>. */
120: public MultiSearcher(Searchable[] searchables) throws IOException {
121: this .searchables = searchables;
122:
123: starts = new int[searchables.length + 1]; // build starts array
124: for (int i = 0; i < searchables.length; i++) {
125: starts[i] = maxDoc;
126: maxDoc += searchables[i].maxDoc(); // compute maxDocs
127: }
128: starts[searchables.length] = maxDoc;
129: }
130:
131: /** Return the array of {@link Searchable}s this searches. */
132: public Searchable[] getSearchables() {
133: return searchables;
134: }
135:
136: protected int[] getStarts() {
137: return starts;
138: }
139:
140: // inherit javadoc
141: public void close() throws IOException {
142: for (int i = 0; i < searchables.length; i++)
143: searchables[i].close();
144: }
145:
146: public int docFreq(Term term) throws IOException {
147: int docFreq = 0;
148: for (int i = 0; i < searchables.length; i++)
149: docFreq += searchables[i].docFreq(term);
150: return docFreq;
151: }
152:
153: // inherit javadoc
154: public Document doc(int n) throws CorruptIndexException,
155: IOException {
156: int i = subSearcher(n); // find searcher index
157: return searchables[i].doc(n - starts[i]); // dispatch to searcher
158: }
159:
160: // inherit javadoc
161: public Document doc(int n, FieldSelector fieldSelector)
162: throws CorruptIndexException, IOException {
163: int i = subSearcher(n); // find searcher index
164: return searchables[i].doc(n - starts[i], fieldSelector); // dispatch to searcher
165: }
166:
167: /** Returns index of the searcher for document <code>n</code> in the array
168: * used to construct this searcher. */
169: public int subSearcher(int n) { // find searcher for doc n:
170: // replace w/ call to Arrays.binarySearch in Java 1.2
171: int lo = 0; // search starts array
172: int hi = searchables.length - 1; // for first element less
173: // than n, return its index
174: while (hi >= lo) {
175: int mid = (lo + hi) >> 1;
176: int midValue = starts[mid];
177: if (n < midValue)
178: hi = mid - 1;
179: else if (n > midValue)
180: lo = mid + 1;
181: else { // found a match
182: while (mid + 1 < searchables.length
183: && starts[mid + 1] == midValue) {
184: mid++; // scan to last match
185: }
186: return mid;
187: }
188: }
189: return hi;
190: }
191:
192: /** Returns the document number of document <code>n</code> within its
193: * sub-index. */
194: public int subDoc(int n) {
195: return n - starts[subSearcher(n)];
196: }
197:
198: public int maxDoc() throws IOException {
199: return maxDoc;
200: }
201:
202: public TopDocs search(Weight weight, Filter filter, int nDocs)
203: throws IOException {
204:
205: HitQueue hq = new HitQueue(nDocs);
206: int totalHits = 0;
207:
208: for (int i = 0; i < searchables.length; i++) { // search each searcher
209: TopDocs docs = searchables[i].search(weight, filter, nDocs);
210: totalHits += docs.totalHits; // update totalHits
211: ScoreDoc[] scoreDocs = docs.scoreDocs;
212: for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
213: ScoreDoc scoreDoc = scoreDocs[j];
214: scoreDoc.doc += starts[i]; // convert doc
215: if (!hq.insert(scoreDoc))
216: break; // no more scores > minScore
217: }
218: }
219:
220: ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
221: for (int i = hq.size() - 1; i >= 0; i--)
222: // put docs in array
223: scoreDocs[i] = (ScoreDoc) hq.pop();
224:
225: float maxScore = (totalHits == 0) ? Float.NEGATIVE_INFINITY
226: : scoreDocs[0].score;
227:
228: return new TopDocs(totalHits, scoreDocs, maxScore);
229: }
230:
231: public TopFieldDocs search(Weight weight, Filter filter, int n,
232: Sort sort) throws IOException {
233: FieldDocSortedHitQueue hq = null;
234: int totalHits = 0;
235:
236: float maxScore = Float.NEGATIVE_INFINITY;
237:
238: for (int i = 0; i < searchables.length; i++) { // search each searcher
239: TopFieldDocs docs = searchables[i].search(weight, filter,
240: n, sort);
241:
242: if (hq == null)
243: hq = new FieldDocSortedHitQueue(docs.fields, n);
244: totalHits += docs.totalHits; // update totalHits
245: maxScore = Math.max(maxScore, docs.getMaxScore());
246: ScoreDoc[] scoreDocs = docs.scoreDocs;
247: for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
248: ScoreDoc scoreDoc = scoreDocs[j];
249: scoreDoc.doc += starts[i]; // convert doc
250: if (!hq.insert(scoreDoc))
251: break; // no more scores > minScore
252: }
253: }
254:
255: ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
256: for (int i = hq.size() - 1; i >= 0; i--)
257: // put docs in array
258: scoreDocs[i] = (ScoreDoc) hq.pop();
259:
260: return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(),
261: maxScore);
262: }
263:
264: // inherit javadoc
265: public void search(Weight weight, Filter filter,
266: final HitCollector results) throws IOException {
267: for (int i = 0; i < searchables.length; i++) {
268:
269: final int start = starts[i];
270:
271: searchables[i].search(weight, filter, new HitCollector() {
272: public void collect(int doc, float score) {
273: results.collect(doc + start, score);
274: }
275: });
276:
277: }
278: }
279:
280: public Query rewrite(Query original) throws IOException {
281: Query[] queries = new Query[searchables.length];
282: for (int i = 0; i < searchables.length; i++) {
283: queries[i] = searchables[i].rewrite(original);
284: }
285: return queries[0].combine(queries);
286: }
287:
288: public Explanation explain(Weight weight, int doc)
289: throws IOException {
290: int i = subSearcher(doc); // find searcher index
291: return searchables[i].explain(weight, doc - starts[i]); // dispatch to searcher
292: }
293:
294: /**
295: * Create weight in multiple index scenario.
296: *
297: * Distributed query processing is done in the following steps:
298: * 1. rewrite query
299: * 2. extract necessary terms
300: * 3. collect dfs for these terms from the Searchables
301: * 4. create query weight using aggregate dfs.
302: * 5. distribute that weight to Searchables
303: * 6. merge results
304: *
305: * Steps 1-4 are done here, 5+6 in the search() methods
306: *
307: * @return rewritten queries
308: */
309: protected Weight createWeight(Query original) throws IOException {
310: // step 1
311: Query rewrittenQuery = rewrite(original);
312:
313: // step 2
314: Set terms = new HashSet();
315: rewrittenQuery.extractTerms(terms);
316:
317: // step3
318: Term[] allTermsArray = new Term[terms.size()];
319: terms.toArray(allTermsArray);
320: int[] aggregatedDfs = new int[terms.size()];
321: for (int i = 0; i < searchables.length; i++) {
322: int[] dfs = searchables[i].docFreqs(allTermsArray);
323: for (int j = 0; j < aggregatedDfs.length; j++) {
324: aggregatedDfs[j] += dfs[j];
325: }
326: }
327:
328: HashMap dfMap = new HashMap();
329: for (int i = 0; i < allTermsArray.length; i++) {
330: dfMap.put(allTermsArray[i], new Integer(aggregatedDfs[i]));
331: }
332:
333: // step4
334: int numDocs = maxDoc();
335: CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs,
336: getSimilarity());
337:
338: return rewrittenQuery.weight(cacheSim);
339: }
340:
341: }
|