001: /**
002: * Licensed to the Apache Software Foundation (ASF) under one or more
003: * contributor license agreements. See the NOTICE file distributed with
004: * this work for additional information regarding copyright ownership.
005: * The ASF licenses this file to You under the Apache License, Version 2.0
006: * (the "License"); you may not use this file except in compliance with
007: * the License. You may obtain a copy of the License at
008: *
009: * http://www.apache.org/licenses/LICENSE-2.0
010: *
011: * Unless required by applicable law or agreed to in writing, software
012: * distributed under the License is distributed on an "AS IS" BASIS,
013: * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014: * See the License for the specific language governing permissions and
015: * limitations under the License.
016: */
017:
018: /**
019: * @author yonik
020: */package org.apache.solr.update;
021:
022: import org.apache.lucene.index.IndexWriter;
023: import org.apache.lucene.index.Term;
024: import org.apache.lucene.index.IndexReader;
025: import org.apache.lucene.index.TermDocs;
026: import org.apache.lucene.document.Document;
027: import org.apache.lucene.search.Query;
028:
029: import java.util.HashMap;
030: import java.util.TreeMap;
031: import java.util.Map;
032: import java.util.concurrent.Executors;
033: import java.util.concurrent.Future;
034: import java.util.concurrent.ExecutionException;
035: import java.util.concurrent.ScheduledExecutorService;
036: import java.util.concurrent.ScheduledFuture;
037: import java.util.concurrent.TimeUnit;
038: import java.util.concurrent.locks.Lock;
039: import java.util.concurrent.locks.ReadWriteLock;
040: import java.util.concurrent.locks.ReentrantReadWriteLock;
041: import java.util.concurrent.atomic.AtomicLong;
042: import java.util.logging.Level;
043: import java.io.IOException;
044: import java.net.URL;
045:
046: import org.apache.solr.search.SolrIndexSearcher;
047: import org.apache.solr.search.QueryParsing;
048: import org.apache.solr.util.NamedList;
049: import org.apache.solr.util.SimpleOrderedMap;
050: import org.apache.solr.core.SolrCore;
051: import org.apache.solr.core.SolrConfig;
052: import org.apache.solr.core.SolrException;
053:
054: /**
055: * <code>DirectUpdateHandler2</code> implements an UpdateHandler where documents are added
056: * directly to the main Lucene index as opposed to adding to a separate smaller index.
057: * For this reason, not all combinations to/from pending and committed are supported.
058: * This version supports efficient removal of duplicates on a commit. It works by maintaining
059: * a related count for every document being added or deleted. At commit time, for every id with a count,
060: * all but the last "count" docs with that id are deleted.
061: * <p>
062: *
063: * Supported add command parameters:
064: <TABLE BORDER>
065: <TR>
066: <TH>allowDups</TH>
067: <TH>overwritePending</TH>
068: <TH>overwriteCommitted</TH>
069: <TH>efficiency</TH>
070: </TR>
071: <TR>
072: <TD>false</TD>
073: <TD>false</TD>
074: <TD>true</TD>
075:
076: <TD>fast</TD>
077: </TR>
078: <TR>
079: <TD>true or false</TD>
080: <TD>true</TD>
081: <TD>true</TD>
082:
083: <TD>fast</TD>
084: </TR>
085: <TR>
086: <TD>true</TD>
087: <TD>false</TD>
088: <TD>false</TD>
089: <TD>fastest</TD>
090: </TR>
091:
092: </TABLE>
093:
094: <p>Supported delete commands:
095: <TABLE BORDER>
096: <TR>
097: <TH>command</TH>
098: <TH>fromPending</TH>
099: <TH>fromCommitted</TH>
100: <TH>efficiency</TH>
101: </TR>
102: <TR>
103: <TD>delete</TD>
104: <TD>true</TD>
105: <TD>true</TD>
106: <TD>fast</TD>
107: </TR>
108: <TR>
109: <TD>deleteByQuery</TD>
110: <TD>true</TD>
111: <TD>true</TD>
112: <TD>very slow*</TD>
113: </TR>
114: </TABLE>
115:
116: <p>* deleteByQuery causes a commit to happen (close current index writer, open new index reader)
117: before it can be processed. If deleteByQuery functionality is needed, it's best if they can
118: be batched and executed together so they may share the same index reader.
119:
120: *
121: * @author yonik
122: * @version $Id: DirectUpdateHandler2.java 542679 2007-05-29 22:28:21Z ryan $
123: * @since solr 0.9
124: */
125:
126: public class DirectUpdateHandler2 extends UpdateHandler {
127:
128: // stats
129: AtomicLong addCommands = new AtomicLong();
130: AtomicLong addCommandsCumulative = new AtomicLong();
131: AtomicLong deleteByIdCommands = new AtomicLong();
132: AtomicLong deleteByIdCommandsCumulative = new AtomicLong();
133: AtomicLong deleteByQueryCommands = new AtomicLong();
134: AtomicLong deleteByQueryCommandsCumulative = new AtomicLong();
135: AtomicLong commitCommands = new AtomicLong();
136: AtomicLong optimizeCommands = new AtomicLong();
137: AtomicLong numDocsDeleted = new AtomicLong();
138: AtomicLong numDocsPending = new AtomicLong();
139: AtomicLong numErrors = new AtomicLong();
140: AtomicLong numErrorsCumulative = new AtomicLong();
141:
142: // tracks when auto-commit should occur
143: protected final CommitTracker tracker;
144:
145: // The key is the id, the value (Integer) is the number
146: // of docs to save (delete all except the last "n" added)
147: protected final Map<String, Integer> pset;
148:
149: // commonly used constants for the count in the pset
150: protected final static Integer ZERO = 0;
151: protected final static Integer ONE = 1;
152:
153: // iwCommit protects internal data and open/close of the IndexWriter and
154: // is a mutex. Any use of the index writer should be protected by iwAccess,
155: // which admits multiple simultaneous acquisitions. iwAccess is
156: // mutually-exclusive with the iwCommit lock.
157: protected final Lock iwAccess, iwCommit;
158:
159: protected IndexWriter writer;
160: protected SolrIndexSearcher searcher;
161:
162: public DirectUpdateHandler2(SolrCore core) throws IOException {
163: super (core);
164: /* A TreeMap is used to maintain the natural ordering of the document ids,
165: which makes commits more efficient
166: */
167: pset = new TreeMap<String, Integer>();
168:
169: ReadWriteLock rwl = new ReentrantReadWriteLock();
170: iwAccess = rwl.readLock();
171: iwCommit = rwl.writeLock();
172:
173: tracker = new CommitTracker();
174: }
175:
176: // must only be called when iwCommit lock held
177: protected void openWriter() throws IOException {
178: if (writer == null) {
179: writer = createMainIndexWriter("DirectUpdateHandler2");
180: }
181: }
182:
183: // must only be called when iwCommit lock held
184: protected void closeWriter() throws IOException {
185: try {
186: numDocsPending.set(0);
187: if (writer != null)
188: writer.close();
189: } finally {
190: // if an exception causes the writelock to not be
191: // released, we could try and delete it here
192: writer = null;
193: }
194: }
195:
196: protected void openSearcher() throws IOException {
197: if (searcher == null) {
198: searcher = core.newSearcher("DirectUpdateHandler2");
199: }
200: }
201:
202: protected void closeSearcher() throws IOException {
203: try {
204: if (searcher != null)
205: searcher.close();
206: } finally {
207: // if an exception causes a lock to not be
208: // released, we could try to delete it.
209: searcher = null;
210: }
211: }
212:
213: public int addDoc(AddUpdateCommand cmd) throws IOException {
214: addCommands.incrementAndGet();
215: addCommandsCumulative.incrementAndGet();
216: int rc = -1;
217:
218: // if there is no ID field, use allowDups
219: if (idField == null) {
220: cmd.allowDups = true;
221: cmd.overwriteCommitted = false;
222: cmd.overwritePending = false;
223: }
224:
225: iwAccess.lock();
226: try {
227:
228: // We can't use iwCommit to protect internal data here, since it would
229: // block other addDoc calls. Hence, we synchronize to protect internal
230: // state. This is safe as all other state-changing operations are
231: // protected with iwCommit (which iwAccess excludes from this block).
232: synchronized (this ) {
233: if (!cmd.allowDups && !cmd.overwritePending
234: && !cmd.overwriteCommitted) {
235: throw new SolrException(
236: SolrException.ErrorCode.BAD_REQUEST,
237: "unsupported param combo:" + cmd);
238: // this would need a reader to implement (to be able to check committed
239: // before adding.)
240: // return addNoOverwriteNoDups(cmd);
241: } else if (!cmd.allowDups && !cmd.overwritePending
242: && cmd.overwriteCommitted) {
243: rc = addConditionally(cmd);
244: } else if (!cmd.allowDups && cmd.overwritePending
245: && !cmd.overwriteCommitted) {
246: throw new SolrException(
247: SolrException.ErrorCode.BAD_REQUEST,
248: "unsupported param combo:" + cmd);
249: } else if (!cmd.allowDups && cmd.overwritePending
250: && cmd.overwriteCommitted) {
251: rc = overwriteBoth(cmd);
252: } else if (cmd.allowDups && !cmd.overwritePending
253: && !cmd.overwriteCommitted) {
254: rc = allowDups(cmd);
255: } else if (cmd.allowDups && !cmd.overwritePending
256: && cmd.overwriteCommitted) {
257: throw new SolrException(
258: SolrException.ErrorCode.BAD_REQUEST,
259: "unsupported param combo:" + cmd);
260: } else if (cmd.allowDups && cmd.overwritePending
261: && !cmd.overwriteCommitted) {
262: throw new SolrException(
263: SolrException.ErrorCode.BAD_REQUEST,
264: "unsupported param combo:" + cmd);
265: } else if (cmd.allowDups && cmd.overwritePending
266: && cmd.overwriteCommitted) {
267: rc = overwriteBoth(cmd);
268: }
269: if (rc == -1)
270: throw new SolrException(
271: SolrException.ErrorCode.BAD_REQUEST,
272: "unsupported param combo:" + cmd);
273:
274: if (rc == 1) {
275: // adding document -- prep writer
276: closeSearcher();
277: openWriter();
278: tracker.addedDocument();
279: } else {
280: // exit prematurely
281: return rc;
282: }
283: } // end synchronized block
284:
285: // this is the only unsynchronized code in the iwAccess block, which
286: // should account for most of the time
287: assert (rc == 1);
288: writer.addDocument(cmd.doc);
289:
290: } finally {
291: iwAccess.unlock();
292: if (rc != 1) {
293: numErrors.incrementAndGet();
294: numErrorsCumulative.incrementAndGet();
295: } else {
296: numDocsPending.incrementAndGet();
297: }
298: }
299: return rc;
300: }
301:
302: // could return the number of docs deleted, but is that always possible to know???
303: public void delete(DeleteUpdateCommand cmd) throws IOException {
304: deleteByIdCommands.incrementAndGet();
305: deleteByIdCommandsCumulative.incrementAndGet();
306:
307: if (!cmd.fromPending && !cmd.fromCommitted) {
308: numErrors.incrementAndGet();
309: numErrorsCumulative.incrementAndGet();
310: throw new SolrException(
311: SolrException.ErrorCode.BAD_REQUEST,
312: "meaningless command: " + cmd);
313: }
314: if (!cmd.fromPending || !cmd.fromCommitted) {
315: numErrors.incrementAndGet();
316: numErrorsCumulative.incrementAndGet();
317: throw new SolrException(
318: SolrException.ErrorCode.BAD_REQUEST,
319: "operation not supported" + cmd);
320: }
321:
322: iwCommit.lock();
323: try {
324: pset.put(idFieldType.toInternal(cmd.id), ZERO);
325: } finally {
326: iwCommit.unlock();
327: }
328: }
329:
330: // why not return number of docs deleted?
331: // Depending on implementation, we may not be able to immediately determine the num...
332: public void deleteByQuery(DeleteUpdateCommand cmd)
333: throws IOException {
334: deleteByQueryCommands.incrementAndGet();
335: deleteByQueryCommandsCumulative.incrementAndGet();
336:
337: if (!cmd.fromPending && !cmd.fromCommitted) {
338: numErrors.incrementAndGet();
339: numErrorsCumulative.incrementAndGet();
340: throw new SolrException(
341: SolrException.ErrorCode.BAD_REQUEST,
342: "meaningless command: " + cmd);
343: }
344: if (!cmd.fromPending || !cmd.fromCommitted) {
345: numErrors.incrementAndGet();
346: numErrorsCumulative.incrementAndGet();
347: throw new SolrException(
348: SolrException.ErrorCode.BAD_REQUEST,
349: "operation not supported" + cmd);
350: }
351:
352: boolean madeIt = false;
353: try {
354: Query q = QueryParsing.parseQuery(cmd.query, schema);
355:
356: int totDeleted = 0;
357: iwCommit.lock();
358: try {
359: // we need to do much of the commit logic (mainly doing queued
360: // deletes since deleteByQuery can throw off our counts.
361: doDeletions();
362:
363: closeWriter();
364: openSearcher();
365:
366: // if we want to count the number of docs that were deleted, then
367: // we need a new instance of the DeleteHitCollector
368: final DeleteHitCollector deleter = new DeleteHitCollector(
369: searcher);
370: searcher.search(q, null, deleter);
371: totDeleted = deleter.deleted;
372: } finally {
373: iwCommit.unlock();
374: }
375:
376: if (SolrCore.log.isLoggable(Level.FINE)) {
377: SolrCore.log
378: .fine("docs deleted by query:" + totDeleted);
379: }
380: numDocsDeleted.getAndAdd(totDeleted);
381: madeIt = true;
382: } finally {
383: if (!madeIt) {
384: numErrors.incrementAndGet();
385: numErrorsCumulative.incrementAndGet();
386: }
387: }
388: }
389:
390: ///////////////////////////////////////////////////////////////////
391: /////////////////// helper method for each add type ///////////////
392: ///////////////////////////////////////////////////////////////////
393:
394: // methods return 1 if the document is to be added; 0 otherwise.
395: // methods must be called in synchronized context
396:
397: protected int addConditionally(AddUpdateCommand cmd)
398: throws IOException {
399: if (cmd.indexedId == null) {
400: cmd.indexedId = getIndexedId(cmd.doc);
401: }
402: Integer saveCount = pset.get(cmd.indexedId);
403: if (saveCount != null && saveCount != 0) {
404: // a doc with this id already exists in the pending set
405: return 0;
406: }
407: pset.put(cmd.indexedId, ONE);
408: return 1;
409: }
410:
411: // overwrite both pending and committed
412: protected int overwriteBoth(AddUpdateCommand cmd)
413: throws IOException {
414: if (cmd.indexedId == null) {
415: cmd.indexedId = getIndexedId(cmd.doc);
416: }
417: pset.put(cmd.indexedId, ONE);
418: return 1;
419: }
420:
421: // add without checking
422: protected int allowDups(AddUpdateCommand cmd) throws IOException {
423: if (cmd.indexedId == null) {
424: cmd.indexedId = getIndexedIdOptional(cmd.doc);
425: }
426: if (cmd.indexedId != null) {
427: Integer saveCount = pset.get(cmd.indexedId);
428:
429: // if there weren't any docs marked for deletion before, then don't mark
430: // any for deletion now.
431: if (saveCount == null)
432: return 1;
433:
434: // If there were docs marked for deletion, then increment the number of
435: // docs to save at the end.
436:
437: // the following line is optional, but it saves an allocation in the common case.
438: if (saveCount == ZERO)
439: saveCount = ONE;
440: else
441: saveCount++;
442:
443: pset.put(cmd.indexedId, saveCount);
444: }
445: return 1;
446: }
447:
448: //
449: // do all needed deletions.
450: // call with iwCommit lock held
451: //
452: protected void doDeletions() throws IOException {
453: int[] docnums = new int[0];
454:
455: if (pset.size() > 0) { // optimization: only open searcher if there is something to delete...
456: log
457: .info("DirectUpdateHandler2 deleting and removing dups for "
458: + pset.size() + " ids");
459: int numDeletes = 0;
460:
461: closeWriter();
462: openSearcher();
463: IndexReader reader = searcher.getReader();
464: TermDocs tdocs = reader.termDocs();
465: String fieldname = idField.getName();
466:
467: for (Map.Entry<String, Integer> entry : pset.entrySet()) {
468: String id = entry.getKey();
469: int saveLast = entry.getValue(); // save the last "saveLast" documents
470:
471: //expand our array that keeps track of docs if needed.
472: if (docnums == null || saveLast > docnums.length) {
473: docnums = new int[saveLast];
474: }
475:
476: // initialize all docnums in the list to -1 (unused)
477: for (int i = 0; i < saveLast; i++) {
478: docnums[i] = -1;
479: }
480:
481: tdocs.seek(new Term(fieldname, id));
482:
483: //
484: // record the docs for this term in the "docnums" array and wrap around
485: // at size "saveLast". If we reuse a slot in the array, then we delete
486: // the doc that was there from the index.
487: //
488: int pos = 0;
489: while (tdocs.next()) {
490: if (saveLast == 0) {
491: // special case - delete all the docs as we see them.
492: reader.deleteDocument(tdocs.doc());
493: numDeletes++;
494: continue;
495: }
496:
497: int prev = docnums[pos];
498: docnums[pos] = tdocs.doc();
499: if (prev != -1) {
500: reader.deleteDocument(prev);
501: numDeletes++;
502: }
503:
504: if (++pos >= saveLast)
505: pos = 0;
506: }
507: }
508:
509: // should we ever shrink it again, or just clear it?
510: pset.clear();
511: log.info("DirectUpdateHandler2 docs deleted=" + numDeletes);
512: numDocsDeleted.addAndGet(numDeletes);
513: }
514:
515: }
516:
517: public void commit(CommitUpdateCommand cmd) throws IOException {
518:
519: if (cmd.optimize) {
520: optimizeCommands.incrementAndGet();
521: } else {
522: commitCommands.incrementAndGet();
523: }
524:
525: Future[] waitSearcher = null;
526: if (cmd.waitSearcher) {
527: waitSearcher = new Future[1];
528: }
529:
530: boolean error = true;
531: iwCommit.lock();
532: try {
533: log.info("start " + cmd);
534: doDeletions();
535:
536: if (cmd.optimize) {
537: closeSearcher();
538: openWriter();
539: writer.optimize();
540: }
541:
542: closeSearcher();
543: closeWriter();
544:
545: callPostCommitCallbacks();
546: if (cmd.optimize) {
547: callPostOptimizeCallbacks();
548: }
549: // open a new searcher in the sync block to avoid opening it
550: // after a deleteByQuery changed the index, or in between deletes
551: // and adds of another commit being done.
552: core.getSearcher(true, false, waitSearcher);
553:
554: // reset commit tracking
555: tracker.didCommit();
556:
557: log.info("end_commit_flush");
558:
559: error = false;
560: } finally {
561: iwCommit.unlock();
562: addCommands.set(0);
563: deleteByIdCommands.set(0);
564: deleteByQueryCommands.set(0);
565: numErrors.set(error ? 1 : 0);
566: }
567:
568: // if we are supposed to wait for the searcher to be registered, then we should do it
569: // outside of the synchronized block so that other update operations can proceed.
570: if (waitSearcher != null && waitSearcher[0] != null) {
571: try {
572: waitSearcher[0].get();
573: } catch (InterruptedException e) {
574: SolrException.log(log, e);
575: } catch (ExecutionException e) {
576: SolrException.log(log, e);
577: }
578: }
579:
580: return;
581: }
582:
583: public void close() throws IOException {
584: log.info("closing " + this );
585: iwCommit.lock();
586: try {
587: // cancel any pending operations
588: if (tracker.pending != null) {
589: tracker.pending.cancel(true);
590: tracker.pending = null;
591: }
592: doDeletions();
593: closeSearcher();
594: closeWriter();
595: } finally {
596: iwCommit.unlock();
597: }
598: log.info("closed " + this );
599: }
600:
601: /** Helper class for tracking autoCommit state.
602: *
603: * Note: This is purely an implementation detail of autoCommit and will
604: * definitely change in the future, so the interface should not be
605: * relied-upon
606: *
607: * Note: all access must be synchronized.
608: */
609: class CommitTracker implements Runnable {
610: // settings, not final so we can change them in testing
611: int docsUpperBound;
612: long timeUpperBound;
613:
614: private final ScheduledExecutorService scheduler = Executors
615: .newScheduledThreadPool(1);
616: private ScheduledFuture pending;
617:
618: // state
619: long docsSinceCommit;
620: int autoCommitCount = 0;
621: long lastAddedTime = -1;
622:
623: public CommitTracker() {
624: docsSinceCommit = 0;
625: pending = null;
626:
627: docsUpperBound = SolrConfig.config.getInt(
628: "updateHandler/autoCommit/maxDocs", -1);
629: timeUpperBound = SolrConfig.config.getInt(
630: "updateHandler/autoCommit/maxTime", -1);
631:
632: SolrCore.log.info("AutoCommit: " + this );
633: }
634:
635: /** Indicate that documents have been added
636: */
637: public void addedDocument() {
638: docsSinceCommit++;
639: lastAddedTime = System.currentTimeMillis();
640: if (pending == null) { // Don't start a new event if one is already waiting
641: if (timeUpperBound > 0) {
642: pending = scheduler.schedule(this , timeUpperBound,
643: TimeUnit.MILLISECONDS);
644: } else if (docsUpperBound > 0
645: && (docsSinceCommit > docsUpperBound)) {
646: // 1/4 second seems fast enough for anyone using maxDocs
647: pending = scheduler.schedule(this , 250,
648: TimeUnit.MILLISECONDS);
649: }
650: }
651: }
652:
653: /** Inform tracker that a commit has occurred, cancel any pending commits */
654: public void didCommit() {
655: if (pending != null) {
656: pending.cancel(false);
657: pending = null; // let it start another one
658: }
659: docsSinceCommit = 0;
660: }
661:
662: /** This is the worker part for the ScheduledFuture **/
663: public synchronized void run() {
664: long started = System.currentTimeMillis();
665: try {
666: CommitUpdateCommand command = new CommitUpdateCommand(
667: false);
668: command.waitFlush = true;
669: command.waitSearcher = true;
670: commit(command);
671: autoCommitCount++;
672: } catch (Exception e) {
673: log.severe("auto commit error...");
674: e.printStackTrace();
675: } finally {
676: pending = null;
677: }
678:
679: // check if docs have been submitted since the commit started
680: if (lastAddedTime > started) {
681: if (docsUpperBound > 0
682: && docsSinceCommit > docsUpperBound) {
683: pending = scheduler.schedule(this , 100,
684: TimeUnit.MILLISECONDS);
685: } else if (timeUpperBound > 0) {
686: pending = scheduler.schedule(this , timeUpperBound,
687: TimeUnit.MILLISECONDS);
688: }
689: }
690: }
691:
692: public String toString() {
693: if (timeUpperBound > 0 || docsUpperBound > 0) {
694: return (timeUpperBound > 0 ? ("if uncommited for "
695: + timeUpperBound + "ms; ") : "")
696: + (docsUpperBound > 0 ? ("if " + docsUpperBound + " uncommited docs ")
697: : "");
698:
699: } else {
700: return "disabled";
701: }
702: }
703: }
704:
705: /////////////////////////////////////////////////////////////////////
706: // SolrInfoMBean stuff: Statistics and Module Info
707: /////////////////////////////////////////////////////////////////////
708:
709: public String getName() {
710: return DirectUpdateHandler2.class.getName();
711: }
712:
713: public String getVersion() {
714: return SolrCore.version;
715: }
716:
717: public String getDescription() {
718: return "Update handler that efficiently directly updates the on-disk main lucene index";
719: }
720:
721: public Category getCategory() {
722: return Category.UPDATEHANDLER;
723: }
724:
725: public String getSourceId() {
726: return "$Id: DirectUpdateHandler2.java 542679 2007-05-29 22:28:21Z ryan $";
727: }
728:
729: public String getSource() {
730: return "$URL: https://svn.apache.org/repos/asf/lucene/solr/branches/branch-1.2/src/java/org/apache/solr/update/DirectUpdateHandler2.java $";
731: }
732:
733: public URL[] getDocs() {
734: return null;
735: }
736:
737: public NamedList getStatistics() {
738: NamedList lst = new SimpleOrderedMap();
739: lst.add("commits", commitCommands.get());
740: if (tracker.docsUpperBound > 0) {
741: lst.add("autocommit maxDocs", tracker.docsUpperBound);
742: }
743: if (tracker.timeUpperBound > 0) {
744: lst.add("autocommit maxTime", "" + tracker.timeUpperBound
745: + "ms");
746: }
747: lst.add("autocommits", tracker.autoCommitCount);
748: lst.add("optimizes", optimizeCommands.get());
749: lst.add("docsPending", numDocsPending.get());
750: // pset.size() not synchronized, but it should be fine to access.
751: lst.add("deletesPending", pset.size());
752: lst.add("adds", addCommands.get());
753: lst.add("deletesById", deleteByIdCommands.get());
754: lst.add("deletesByQuery", deleteByQueryCommands.get());
755: lst.add("errors", numErrors.get());
756: lst.add("cumulative_adds", addCommandsCumulative.get());
757: lst.add("cumulative_deletesById", deleteByIdCommandsCumulative
758: .get());
759: lst.add("cumulative_deletesByQuery",
760: deleteByQueryCommandsCumulative.get());
761: lst.add("cumulative_errors", numErrorsCumulative.get());
762: lst.add("docsDeleted", numDocsDeleted.get());
763: return lst;
764: }
765:
766: public String toString() {
767: return "DirectUpdateHandler2" + getStatistics();
768: }
769: }
|