001: // kelondroEcoIndex.java
002: // (C) 2008 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
003: // first published 14.01.2008 on http://yacy.net
004: //
005: // $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
006: // $LastChangedRevision: 1986 $
007: // $LastChangedBy: orbiter $
008: //
009: // LICENSE
010: //
011: // This program is free software; you can redistribute it and/or modify
012: // it under the terms of the GNU General Public License as published by
013: // the Free Software Foundation; either version 2 of the License, or
014: // (at your option) any later version.
015: //
016: // This program is distributed in the hope that it will be useful,
017: // but WITHOUT ANY WARRANTY; without even the implied warranty of
018: // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
019: // GNU General Public License for more details.
020: //
021: // You should have received a copy of the GNU General Public License
022: // along with this program; if not, write to the Free Software
023: // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
024:
025: package de.anomic.kelondro;
026:
027: import java.io.File;
028: import java.io.FileNotFoundException;
029: import java.io.FileOutputStream;
030: import java.io.IOException;
031: import java.util.ArrayList;
032: import java.util.Date;
033: import java.util.HashMap;
034: import java.util.Iterator;
035: import java.util.List;
036: import java.util.Map;
037: import java.util.TreeMap;
038: import java.util.TreeSet;
039:
040: import de.anomic.kelondro.kelondroRow.Entry;
041: import de.anomic.server.serverMemory;
042:
043: /*
044: * The EcoIndex builts upon the EcoFS and tries to reduce the number of IO requests that the
045: * EcoFS must do to a minimum. In best cases, no IO has to be done for read operations (complete database shadow in RAM)
046: * and a rare number of write IO operations must be done for a large number of table-writings (using the write buffer of EcoFS)
047: * To make the EcoIndex scalable in question of available RAM, there are two elements that must be scalable:
048: * - the access index can be either completely in RAM (kelondroRAMIndex) or it is file-based (kelondroTree)
049: * - the content cache can be either a complete RAM-based shadow of the File, or empty.
050: * The content cache can also be deleted during run-time, if the available RAM gets too low.
051: */
052:
053: public class kelondroEcoTable implements kelondroIndex {
054:
055: // static tracker objects
056: private static TreeMap<String, kelondroEcoTable> tableTracker = new TreeMap<String, kelondroEcoTable>();
057:
058: public static final int tailCacheDenyUsage = 0;
059: public static final int tailCacheForceUsage = 1;
060: public static final int tailCacheUsageAuto = 2;
061:
062: public static final long maxarraylength = 134217727; // that may be the maxmimum size of array length in some JVMs
063:
064: private kelondroRowSet table;
065: private kelondroBytesIntMap index;
066: private kelondroBufferedEcoFS file;
067: private kelondroRow rowdef, taildef;
068: private int buffersize;
069:
070: public kelondroEcoTable(File tablefile, kelondroRow rowdef,
071: int useTailCache, int buffersize, int initialSpace) {
072: this .rowdef = rowdef;
073: this .buffersize = buffersize;
074: assert rowdef.primaryKeyIndex == 0;
075: // define the taildef, a row like the rowdef but without the first column
076: kelondroColumn[] cols = new kelondroColumn[rowdef.columns() - 1];
077: for (int i = 0; i < cols.length; i++) {
078: cols[i] = rowdef.column(i + 1);
079: }
080: this .taildef = new kelondroRow(cols,
081: kelondroNaturalOrder.naturalOrder, -1);
082:
083: // initialize table file
084: if (!tablefile.exists()) {
085: // make new file
086: FileOutputStream fos = null;
087: try {
088: fos = new FileOutputStream(tablefile);
089: } catch (FileNotFoundException e) {
090: // should not happen
091: e.printStackTrace();
092: }
093: try {
094: fos.close();
095: } catch (IOException e) {
096: }
097: }
098:
099: try {
100: // open an existing table file
101: this .file = new kelondroBufferedEcoFS(new kelondroEcoFS(
102: tablefile, rowdef.objectsize), this .buffersize);
103:
104: // initialize index and copy table
105: int records = (int) Math.max(file.size(), initialSpace);
106: long neededRAM4table = records * (rowdef.objectsize + 4)
107: * 3 / 2;
108: table = ((neededRAM4table < maxarraylength) && ((useTailCache == tailCacheForceUsage) || ((useTailCache == tailCacheUsageAuto) && (serverMemory
109: .request(neededRAM4table + 200 * 1024 * 1024, false))))) ? new kelondroRowSet(
110: taildef, records)
111: : null;
112: System.out.println("*** DEBUG " + tablefile
113: + ": available RAM: "
114: + (serverMemory.available() / 1024 / 1024)
115: + "MB, allocating space for " + records
116: + " entries");
117: long neededRAM4index = 2 * 1024 * 1024 + records
118: * (rowdef.primaryKeyLength + 4) * 3 / 2;
119: if (!serverMemory.request(neededRAM4index, false)) {
120: // despite calculations seemed to show that there is enough memory for the table AND the index
121: // there is now not enough memory left for the index. So delete the table again to free the memory
122: // for the index
123: System.out
124: .println("*** DEBUG "
125: + tablefile
126: + ": not enough RAM ("
127: + (serverMemory.available() / 1024 / 1024)
128: + "MB) left for index, deleting allocated table space to enable index space allocation (needed: "
129: + (neededRAM4index / 1024 / 1024)
130: + "MB)");
131: table = null;
132: System.gc();
133: System.out.println("*** DEBUG " + tablefile
134: + ": RAM after releasing the table: "
135: + (serverMemory.available() / 1024 / 1024)
136: + "MB");
137: }
138: index = new kelondroBytesIntMap(rowdef.primaryKeyLength,
139: rowdef.objectOrder, records);
140: System.out.println("*** DEBUG " + tablefile + ": EcoTable "
141: + tablefile.toString() + " has table copy "
142: + ((table == null) ? "DISABLED" : "ENABLED"));
143:
144: // read all elements from the file into the copy table
145: byte[] record = new byte[rowdef.objectsize];
146: byte[] key = new byte[rowdef.primaryKeyLength];
147: int fs = (int) file.size();
148: System.out.print("*** initializing RAM index for EcoTable "
149: + tablefile.getName() + ":");
150: for (int i = 0; i < fs; i++) {
151: // read entry
152: file.get(i, record, 0);
153:
154: // write the key into the index table
155: System.arraycopy(record, 0, key, 0,
156: rowdef.primaryKeyLength);
157: index.addi(key, i);
158:
159: // write the tail into the table
160: if (table != null)
161: table.addUnique(taildef.newEntry(record,
162: rowdef.primaryKeyLength, true));
163:
164: if ((i % 10000) == 0) {
165: System.out.print('.');
166: System.out.flush();
167: }
168: }
169: System.out.print(" -ordering- ..");
170: System.out.flush();
171: // check consistency
172: ArrayList<Integer[]> doubles = index.removeDoubles();
173: System.out.println(" -removed " + doubles.size()
174: + " doubles- done.");
175: if (doubles.size() > 0) {
176: System.out.println("DEBUG " + tablefile
177: + ": WARNING - EcoTable " + tablefile + " has "
178: + doubles.size() + " doubles");
179: // from all the doubles take one, put it back to the index and remove the others from the file
180: Iterator<Integer[]> i = doubles.iterator();
181: Integer[] ds;
182: // first put back one element each
183: while (i.hasNext()) {
184: ds = i.next();
185: file.get(ds[0].longValue(), record, 0);
186: System.arraycopy(record, 0, key, 0,
187: rowdef.primaryKeyLength);
188: index.addi(key, ds[0].intValue());
189: }
190: // then remove the other doubles by removing them from the table, but do a re-indexing while doing that
191: // first aggregate all the delete positions because the elements from the top positions must be removed first
192: i = doubles.iterator();
193: TreeSet<Integer> delpos = new TreeSet<Integer>();
194: while (i.hasNext()) {
195: ds = i.next();
196: for (int j = 1; j < ds.length; j++) {
197: delpos.add(ds[j]);
198: }
199: }
200: // now remove the entries in a sorted way (top-down)
201: Integer top;
202: while (delpos.size() > 0) {
203: top = delpos.last();
204: delpos.remove(top);
205: removeInFile(top.intValue());
206: }
207: }
208: } catch (FileNotFoundException e) {
209: // should never happen
210: e.printStackTrace();
211: throw new kelondroException(e.getMessage());
212: } catch (IOException e) {
213: e.printStackTrace();
214: throw new kelondroException(e.getMessage());
215: }
216: try {
217: assert file.size() == index.size() : "file.size() = "
218: + file.size() + ", index.size() = " + index.size();
219: } catch (IOException e) {
220: e.printStackTrace();
221: }
222:
223: // track this table
224: tableTracker.put(tablefile.toString(), this );
225: }
226:
227: public static long tableSize(File tablefile, int recordsize) {
228: // returns number of records in table
229: return kelondroEcoFS.tableSize(tablefile, recordsize);
230: }
231:
232: public static final Iterator<String> filenames() {
233: // iterates string objects; all file names from record tracker
234: return tableTracker.keySet().iterator();
235: }
236:
237: public static final Map<String, String> memoryStats(String filename) {
238: // returns a map for each file in the tracker;
239: // the map represents properties for each record objects,
240: // i.e. for cache memory allocation
241: kelondroEcoTable theEcoTable = tableTracker.get(filename);
242: return theEcoTable.memoryStats();
243: }
244:
245: private final Map<String, String> memoryStats() {
246: // returns statistical data about this object
247: assert ((table == null) || (table.size() == index.size()));
248: HashMap<String, String> map = new HashMap<String, String>();
249: map.put("tableSize", Integer.toString(index.size()));
250: map.put("tableKeyChunkSize", Integer
251: .toString(index.row().objectsize));
252: map
253: .put(
254: "tableKeyMem",
255: Integer
256: .toString((int) (index.row().objectsize
257: * index.size() * kelondroRowCollection.growfactor)));
258: map.put("tableValueChunkSize", (table == null) ? "0" : Integer
259: .toString(table.row().objectsize));
260: map
261: .put(
262: "tableValueMem",
263: (table == null) ? "0"
264: : Integer
265: .toString((int) (table.row().objectsize
266: * table.size() * kelondroRowCollection.growfactor)));
267: return map;
268: }
269:
270: public static int staticRAMIndexNeed(File f, kelondroRow rowdef) {
271: return (int) ((rowdef.primaryKeyLength + 4)
272: * tableSize(f, rowdef.objectsize) * kelondroRowSet.growfactor);
273: }
274:
275: public synchronized void addUnique(Entry row) throws IOException {
276: assert file.size() == index.size() : "file.size() = "
277: + file.size() + ", index.size() = " + index.size();
278: assert ((table == null) || (table.size() == index.size()));
279: int i = (int) file.size();
280: index.addi(row.getPrimaryKeyBytes(), i);
281: if (table != null) {
282: assert table.size() == i;
283: table.addUnique(taildef.newEntry(row.bytes(),
284: rowdef.primaryKeyLength, true));
285: }
286: file.put(i, row.bytes(), 0);
287: assert file.size() == index.size() : "file.size() = "
288: + file.size() + ", index.size() = " + index.size();
289: }
290:
291: public synchronized void addUniqueMultiple(List<Entry> rows)
292: throws IOException {
293: assert file.size() == index.size() : "file.size() = "
294: + file.size() + ", index.size() = " + index.size();
295: Iterator<Entry> i = rows.iterator();
296: while (i.hasNext()) {
297: addUnique(i.next());
298: }
299: assert file.size() == index.size() : "file.size() = "
300: + file.size() + ", index.size() = " + index.size();
301: }
302:
303: public synchronized ArrayList<kelondroRowSet> removeDoubles()
304: throws IOException {
305: ArrayList<Integer[]> indexreport = index.removeDoubles();
306: ArrayList<kelondroRowSet> report = new ArrayList<kelondroRowSet>();
307: Iterator<Integer[]> i = indexreport.iterator();
308: Integer[] is;
309: kelondroRowSet rows;
310: TreeSet<Integer> d = new TreeSet<Integer>();
311: byte[] b = new byte[rowdef.objectsize];
312: while (i.hasNext()) {
313: is = i.next();
314: rows = new kelondroRowSet(this .rowdef, is.length);
315: for (int j = 0; j < is.length; j++) {
316: d.add(is[j]);
317: file.get(is[j].intValue(), b, 0);
318: rows.addUnique(rowdef.newEntry(b));
319: }
320: report.add(rows);
321: }
322: // finally delete the affected rows, but start with largest id first, othervise we overwrite wrong entries
323: Integer s;
324: while (d.size() > 0) {
325: s = d.last();
326: d.remove(s);
327: this .removeInFile(s.intValue());
328: }
329: return report;
330: }
331:
332: public void close() {
333: file.close();
334: file = null;
335: }
336:
337: public void finalize() {
338: if (this .file != null)
339: this .close();
340: }
341:
342: public String filename() {
343: return this .file.filename().toString();
344: }
345:
346: public synchronized Entry get(byte[] key) throws IOException {
347: assert file.size() == index.size() : "file.size() = "
348: + file.size() + ", index.size() = " + index.size();
349: assert ((table == null) || (table.size() == index.size()));
350: int i = index.geti(key);
351: if (i == -1)
352: return null;
353: byte[] b = new byte[rowdef.objectsize];
354: if (table == null) {
355: // read row from the file
356: file.get(i, b, 0);
357: } else {
358: // construct the row using the copy in RAM
359: kelondroRow.Entry v = table.get(i);
360: assert v != null;
361: assert key.length == rowdef.primaryKeyLength;
362: System.arraycopy(key, 0, b, 0, key.length);
363: System.arraycopy(v.bytes(), 0, b, rowdef.primaryKeyLength,
364: rowdef.objectsize - rowdef.primaryKeyLength);
365: }
366: assert file.size() == index.size() : "file.size() = "
367: + file.size() + ", index.size() = " + index.size();
368: assert ((table == null) || (table.size() == index.size()));
369: return rowdef.newEntry(b);
370: }
371:
372: public synchronized boolean has(byte[] key) throws IOException {
373: assert file.size() == index.size() : "file.size() = "
374: + file.size() + ", index.size() = " + index.size();
375: assert ((table == null) || (table.size() == index.size()));
376: return index.geti(key) >= 0;
377: }
378:
379: public synchronized kelondroCloneableIterator<byte[]> keys(
380: boolean up, byte[] firstKey) throws IOException {
381: return index.keys(up, firstKey);
382: }
383:
384: public kelondroProfile profile() {
385: return null;
386: }
387:
388: public synchronized Entry put(Entry row) throws IOException {
389: assert file.size() == index.size() : "file.size() = "
390: + file.size() + ", index.size() = " + index.size();
391: assert ((table == null) || (table.size() == index.size()));
392: assert row != null;
393: assert row.bytes() != null;
394: if ((row == null) || (row.bytes() == null))
395: return null;
396: int i = index.geti(row.getPrimaryKeyBytes());
397: if (i == -1) {
398: addUnique(row);
399: return null;
400: }
401:
402: byte[] b = new byte[rowdef.objectsize];
403: if (table == null) {
404: // read old value
405: file.get(i, b, 0);
406: // write new value
407: file.put(i, row.bytes(), 0);
408: } else {
409: // read old value
410: kelondroRow.Entry v = table.get(i);
411: assert v != null;
412: System.arraycopy(row.getPrimaryKeyBytes(), 0, b, 0,
413: rowdef.primaryKeyLength);
414: System.arraycopy(v.bytes(), 0, b, rowdef.primaryKeyLength,
415: rowdef.objectsize - rowdef.primaryKeyLength);
416: // write new value
417: table.set(i, taildef.newEntry(row.bytes(),
418: rowdef.primaryKeyLength, true));
419: file.put(i, row.bytes(), 0);
420: }
421: assert file.size() == index.size() : "file.size() = "
422: + file.size() + ", index.size() = " + index.size();
423: assert ((table == null) || (table.size() == index.size()));
424: // return old value
425: return rowdef.newEntry(b);
426: }
427:
428: public synchronized Entry put(Entry row, Date entryDate)
429: throws IOException {
430: return put(row);
431: }
432:
433: public synchronized void putMultiple(List<Entry> rows)
434: throws IOException {
435: assert file.size() == index.size() : "file.size() = "
436: + file.size() + ", index.size() = " + index.size();
437: Iterator<Entry> i = rows.iterator();
438: while (i.hasNext()) {
439: put(i.next());
440: }
441: assert file.size() == index.size() : "file.size() = "
442: + file.size() + ", index.size() = " + index.size();
443: }
444:
445: private void removeInFile(int i) throws IOException {
446: assert i >= 0;
447:
448: byte[] p = new byte[rowdef.objectsize];
449: if (table == null) {
450: if (i == index.size() - 1) {
451: file.cleanLast();
452: } else {
453: file.cleanLast(p, 0);
454: file.put(i, p, 0);
455: byte[] k = new byte[rowdef.primaryKeyLength];
456: System.arraycopy(p, 0, k, 0, rowdef.primaryKeyLength);
457: index.puti(k, i);
458: }
459: } else {
460: if (i == index.size() - 1) {
461: // special handling if the entry is the last entry in the file
462: table.removeRow(i, false);
463: file.cleanLast();
464: } else {
465: // switch values
466: kelondroRow.Entry te = table.removeOne();
467: table.set(i, te);
468:
469: file.cleanLast(p, 0);
470: file.put(i, p, 0);
471: kelondroRow.Entry lr = rowdef.newEntry(p);
472: index.puti(lr.getPrimaryKeyBytes(), i);
473: }
474: }
475: }
476:
477: public synchronized Entry remove(byte[] key, boolean keepOrder)
478: throws IOException {
479: assert file.size() == index.size() : "file.size() = "
480: + file.size() + ", index.size() = " + index.size();
481: assert ((table == null) || (table.size() == index.size()));
482: assert keepOrder == false; // this class cannot keep the order during a remove
483: assert key.length == rowdef.primaryKeyLength;
484: int i = index.geti(key);
485: if (i == -1)
486: return null; // nothing to do
487:
488: // prepare result
489: byte[] b = new byte[rowdef.objectsize];
490: byte[] p = new byte[rowdef.objectsize];
491: int sb = index.size();
492: int ix;
493: assert i < index.size();
494: if (table == null) {
495: if (i == index.size() - 1) {
496: ix = index.removei(key);
497: assert ix == i;
498: file.cleanLast(b, 0);
499: } else {
500: assert i < index.size() - 1;
501: ix = index.removei(key);
502: assert ix == i;
503: file.get(i, b, 0);
504: file.cleanLast(p, 0);
505: file.put(i, p, 0);
506: byte[] k = new byte[rowdef.primaryKeyLength];
507: System.arraycopy(p, 0, k, 0, rowdef.primaryKeyLength);
508: index.puti(k, i);
509: }
510: assert (file.size() == index.size());
511: } else {
512: // get result value from the table copy, so we don't need to read it from the file
513: kelondroRow.Entry v = table.get(i);
514: System.arraycopy(key, 0, b, 0, key.length);
515: System.arraycopy(v.bytes(), 0, b, rowdef.primaryKeyLength,
516: taildef.objectsize);
517:
518: if (i == index.size() - 1) {
519: // special handling if the entry is the last entry in the file
520: ix = index.removei(key);
521: assert ix == i;
522: table.removeRow(i, false);
523: file.cleanLast();
524: } else {
525: // switch values
526: ix = index.removei(key);
527: assert ix == i;
528:
529: kelondroRow.Entry te = table.removeOne();
530: table.set(i, te);
531:
532: file.cleanLast(p, 0);
533: file.put(i, p, 0);
534: kelondroRow.Entry lr = rowdef.newEntry(p);
535: index.puti(lr.getPrimaryKeyBytes(), i);
536: }
537: assert (file.size() == index.size());
538: assert (table.size() == index.size()) : "table.size() = "
539: + table.size() + ", index.size() = " + index.size();
540: }
541: assert file.size() == index.size() : "file.size() = "
542: + file.size() + ", index.size() = " + index.size();
543: assert ((table == null) || (table.size() == index.size()));
544: assert index.size() + 1 == sb : "index.size() = "
545: + index.size() + ", sb = " + sb;
546: return rowdef.newEntry(b);
547: }
548:
549: public synchronized Entry removeOne() throws IOException {
550: assert file.size() == index.size() : "file.size() = "
551: + file.size() + ", index.size() = " + index.size();
552: assert ((table == null) || (table.size() == index.size()));
553: byte[] le = new byte[rowdef.objectsize];
554: file.cleanLast(le, 0);
555: kelondroRow.Entry lr = rowdef.newEntry(le);
556: int i = index.removei(lr.getPrimaryKeyBytes());
557: assert i >= 0;
558: if (table != null)
559: table.removeOne();
560: assert file.size() == index.size() : "file.size() = "
561: + file.size() + ", index.size() = " + index.size();
562: return lr;
563: }
564:
565: public void reset() throws IOException {
566: File f = file.filename();
567: file.close();
568: f.delete();
569:
570: // make new file
571: FileOutputStream fos = null;
572: try {
573: fos = new FileOutputStream(f);
574: } catch (FileNotFoundException e) {
575: // should not happen
576: e.printStackTrace();
577: }
578: try {
579: fos.close();
580: } catch (IOException e) {
581: }
582:
583: // open an existing table file
584: try {
585: this .file = new kelondroBufferedEcoFS(new kelondroEcoFS(f,
586: rowdef.objectsize), this .buffersize);
587: } catch (FileNotFoundException e) {
588: // should never happen
589: e.printStackTrace();
590: }
591:
592: // initialize index and copy table
593: table = new kelondroRowSet(taildef, 1);
594: index = new kelondroBytesIntMap(rowdef.primaryKeyLength,
595: rowdef.objectOrder, 1);
596: }
597:
598: public kelondroRow row() {
599: return this .rowdef;
600: }
601:
602: public synchronized int size() {
603: return index.size();
604: }
605:
606: public synchronized kelondroCloneableIterator<Entry> rows(
607: boolean up, byte[] firstKey) throws IOException {
608: return new rowIterator(up, firstKey);
609: }
610:
611: public class rowIterator implements
612: kelondroCloneableIterator<Entry> {
613: Iterator<byte[]> i;
614: boolean up;
615: byte[] fk;
616: int c;
617:
618: public rowIterator(boolean up, byte[] firstKey)
619: throws IOException {
620: this .up = up;
621: this .fk = firstKey;
622: this .i = index.keys(up, firstKey);
623: this .c = -1;
624: }
625:
626: public kelondroCloneableIterator<Entry> clone(Object modifier) {
627: try {
628: return new rowIterator(up, fk);
629: } catch (IOException e) {
630: e.printStackTrace();
631: return null;
632: }
633: }
634:
635: public boolean hasNext() {
636: return i.hasNext();
637: }
638:
639: public Entry next() {
640: byte[] k = i.next();
641: try {
642: this .c = index.geti(k);
643: } catch (IOException e) {
644: e.printStackTrace();
645: return null;
646: }
647: byte[] b = new byte[rowdef.objectsize];
648: if (table == null) {
649: // read from file
650: try {
651: file.get(this .c, b, 0);
652: } catch (IOException e) {
653: e.printStackTrace();
654: return null;
655: }
656: } else {
657: // compose from table and key
658: kelondroRow.Entry v = table.get(this .c);
659: System.arraycopy(k, 0, b, 0, rowdef.primaryKeyLength);
660: System.arraycopy(v.bytes(), 0, b,
661: rowdef.primaryKeyLength, taildef.objectsize);
662: }
663: return rowdef.newEntry(b);
664: }
665:
666: public void remove() {
667: throw new UnsupportedOperationException(
668: "no remove in EcoTable");
669: }
670:
671: }
672:
673: public static kelondroIndex testTable(File f, String testentities,
674: int testcase) throws IOException {
675: if (f.exists())
676: f.delete();
677: kelondroRow rowdef = new kelondroRow("byte[] a-4, byte[] b-4",
678: kelondroNaturalOrder.naturalOrder, 0);
679: kelondroIndex tt = new kelondroEcoTable(f, rowdef, testcase,
680: 100, 0);
681: byte[] b;
682: kelondroRow.Entry row = rowdef.newEntry();
683: for (int i = 0; i < testentities.length(); i++) {
684: b = kelondroTree.testWord(testentities.charAt(i));
685: row.setCol(0, b);
686: row.setCol(1, b);
687: tt.put(row);
688: }
689: return tt;
690: }
691:
692: public static void bigtest(int elements, File testFile, int testcase) {
693: System.out.println("starting big test with " + elements
694: + " elements:");
695: long start = System.currentTimeMillis();
696: String[] s = kelondroTree.permutations(elements);
697: kelondroIndex tt;
698: try {
699: for (int i = 0; i < s.length; i++) {
700: System.out.println("*** probing tree " + i
701: + " for permutation " + s[i]);
702: // generate tree and delete elements
703: tt = testTable(testFile, s[i], testcase);
704: if (kelondroTree.countElements(tt) != tt.size()) {
705: System.out.println("wrong size for " + s[i]);
706: }
707: tt.close();
708: for (int j = 0; j < s.length; j++) {
709: tt = testTable(testFile, s[i], testcase);
710: // delete by permutation j
711: for (int elt = 0; elt < s[j].length(); elt++) {
712: tt.remove(kelondroTree.testWord(s[j]
713: .charAt(elt)), false);
714: if (kelondroTree.countElements(tt) != tt.size()) {
715: System.out
716: .println("ERROR! wrong size for probe tree "
717: + s[i]
718: + "; probe delete "
719: + s[j]
720: + "; position "
721: + elt);
722: }
723: }
724: tt.close();
725: }
726: }
727: System.out.println("FINISHED test after "
728: + ((System.currentTimeMillis() - start) / 1000)
729: + " seconds.");
730: } catch (Exception e) {
731: e.printStackTrace();
732: System.out.println("TERMINATED");
733: }
734: }
735:
736: public static void main(String[] args) {
737: // open a file, add one entry and exit
738: File f = new File(args[0]);
739: System.out.println("========= Testcase: no tail cache:");
740: bigtest(5, f, tailCacheDenyUsage);
741: System.out.println("========= Testcase: with tail cache:");
742: bigtest(5, f, tailCacheForceUsage);
743: /*
744: kelondroRow row = new kelondroRow("byte[] key-4, byte[] x-5", kelondroNaturalOrder.naturalOrder, 0);
745: try {
746: kelondroEcoTable t = new kelondroEcoTable(f, row);
747: kelondroRow.Entry entry = row.newEntry();
748: entry.setCol(0, "abcd".getBytes());
749: entry.setCol(1, "dummy".getBytes());
750: t.put(entry);
751: t.close();
752: } catch (IOException e) {
753: e.printStackTrace();
754: }
755: */
756: }
757:
758: }
|