001: // kelondroCachedRecords.java
002: // (C) 2003 - 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
003: // first published 2003 on http://yacy.net
004: //
005: // This is a part of YaCy, a peer-to-peer based web search engine
006: //
007: // $LastChangedDate: 2007-12-27 17:56:59 +0000 (Do, 27 Dez 2007) $
008: // $LastChangedRevision: 4292 $
009: // $LastChangedBy: orbiter $
010: //
011: // LICENSE
012: //
013: // This program is free software; you can redistribute it and/or modify
014: // it under the terms of the GNU General Public License as published by
015: // the Free Software Foundation; either version 2 of the License, or
016: // (at your option) any later version.
017: //
018: // This program is distributed in the hope that it will be useful,
019: // but WITHOUT ANY WARRANTY; without even the implied warranty of
020: // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
021: // GNU General Public License for more details.
022: //
023: // You should have received a copy of the GNU General Public License
024: // along with this program; if not, write to the Free Software
025: // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
026:
027: package de.anomic.kelondro;
028:
029: import java.io.File;
030: import java.io.IOException;
031: import java.util.HashMap;
032: import java.util.Iterator;
033: import java.util.Map;
034: import java.util.TreeMap;
035:
036: import de.anomic.server.serverMemory;
037:
038: public class kelondroCachedRecords extends kelondroAbstractRecords
039: implements kelondroRecords {
040:
041: // memory calculation
042: private static final int element_in_cache = 4; // for kelondroCollectionObjectMap: 4; for HashMap: 52
043:
044: // static supervision objects: recognize and coordinate all activites
045: private static TreeMap<String, kelondroCachedRecords> recordTracker = new TreeMap<String, kelondroCachedRecords>();
046: private static long memStopGrow = 10000000; // a limit for the node cache to stop growing if less than this memory amount is available
047: private static long memStartShrink = 6000000; // a limit for the node cache to start with shrinking if less than this memory amount is available
048:
049: // caching buffer
050: private kelondroIntBytesMap cacheHeaders; // the cache; holds overhead values and key element
051: private int readHit, readMiss, writeUnique, writeDouble,
052: cacheDelete, cacheFlush;
053:
054: public kelondroCachedRecords(File file, boolean useNodeCache,
055: long preloadTime, short ohbytec, short ohhandlec,
056: kelondroRow rowdef, int FHandles, int txtProps,
057: int txtPropWidth) throws IOException {
058: super (file, useNodeCache, ohbytec, ohhandlec, rowdef, FHandles,
059: txtProps, txtPropWidth);
060: initCache(useNodeCache, preloadTime);
061: if (useNodeCache)
062: recordTracker.put(this .filename, this );
063: }
064:
065: public kelondroCachedRecords(kelondroRA ra, String filename,
066: boolean useNodeCache, long preloadTime, short ohbytec,
067: short ohhandlec, kelondroRow rowdef, int FHandles,
068: int txtProps, int txtPropWidth, boolean exitOnFail) {
069: super (ra, filename, useNodeCache, ohbytec, ohhandlec, rowdef,
070: FHandles, txtProps, txtPropWidth, exitOnFail);
071: initCache(useNodeCache, preloadTime);
072: if (useNodeCache)
073: recordTracker.put(this .filename, this );
074: }
075:
076: public kelondroCachedRecords(kelondroRA ra, String filename,
077: boolean useNodeCache, long preloadTime) throws IOException {
078: super (ra, filename, useNodeCache);
079: initCache(useNodeCache, preloadTime);
080: if (useNodeCache)
081: recordTracker.put(this .filename, this );
082: }
083:
084: private void initCache(boolean useNodeCache, long preloadTime) {
085: if (useNodeCache) {
086: this .cacheHeaders = new kelondroIntBytesMap(
087: this .headchunksize, 0);
088: } else {
089: this .cacheHeaders = null;
090: }
091: this .readHit = 0;
092: this .readMiss = 0;
093: this .writeUnique = 0;
094: this .writeDouble = 0;
095: this .cacheDelete = 0;
096: this .cacheFlush = 0;
097: // pre-load node cache
098: if ((preloadTime > 0) && (useNodeCache)) {
099: long stop = System.currentTimeMillis() + preloadTime;
100: int count = 0;
101: try {
102: Iterator<kelondroNode> i = contentNodes(preloadTime);
103: CacheNode n;
104: while ((System.currentTimeMillis() < stop)
105: && (cacheGrowStatus() == 2) && (i.hasNext())) {
106: n = (CacheNode) i.next();
107: cacheHeaders.addb(n.handle().index, n.headChunk);
108: count++;
109: }
110: cacheHeaders.flush();
111: logFine("preloaded " + count + " records into cache");
112: } catch (kelondroException e) {
113: // the contentNodes iterator had a time-out; we don't do a preload
114: logFine("could not preload records: " + e.getMessage());
115: }
116:
117: }
118: }
119:
120: private int cacheGrowStatus() {
121: long available = serverMemory.available();
122: if ((cacheHeaders != null)
123: && (available < cacheHeaders.memoryNeededForGrow()))
124: return 0;
125: return cacheGrowStatus(available, memStopGrow, memStartShrink);
126: }
127:
128: public static final int cacheGrowStatus(long available,
129: long stopGrow, long startShrink) {
130: // returns either 0, 1 or 2:
131: // 0: cache is not allowed to grow, but shall shrink
132: // 1: cache is allowed to grow, but need not to shrink
133: // 2: cache is allowed to grow and must not shrink
134: if (available > stopGrow)
135: return 2;
136: if (available > startShrink) {
137: serverMemory.gc(30000,
138: "kelendroCacheRecords.cacheGrowStatus(...) 1"); // thq
139: return 1;
140: }
141: serverMemory.gc(3000,
142: "kelendroCacheRecords.cacheGrowStatus(...) 0"); // thq
143: return 0;
144: }
145:
146: public static void setCacheGrowStati(long memStopGrowNew,
147: long memStartShrinkNew) {
148: memStopGrow = memStopGrowNew;
149: memStartShrink = memStartShrinkNew;
150: }
151:
152: public static long getMemStopGrow() {
153: return memStopGrow;
154: }
155:
156: public static long getMemStartShrink() {
157: return memStartShrink;
158: }
159:
160: public static final Iterator<String> filenames() {
161: // iterates string objects; all file names from record tracker
162: return recordTracker.keySet().iterator();
163: }
164:
165: public static final Map<String, String> memoryStats(String filename) {
166: // returns a map for each file in the tracker;
167: // the map represents properties for each record oobjects,
168: // i.e. for cache memory allocation
169: kelondroCachedRecords theRecord = (kelondroCachedRecords) recordTracker
170: .get(filename);
171: return theRecord.memoryStats();
172: }
173:
174: private final Map<String, String> memoryStats() {
175: // returns statistical data about this object
176: if (cacheHeaders == null)
177: return null;
178: HashMap<String, String> map = new HashMap<String, String>();
179: map.put("nodeChunkSize", Integer.toString(this .headchunksize
180: + element_in_cache));
181: map
182: .put("nodeCacheCount", Integer.toString(cacheHeaders
183: .size()));
184: map.put("nodeCacheMem", Integer.toString(cacheHeaders.size()
185: * (this .headchunksize + element_in_cache)));
186: map.put("nodeCacheReadHit", Integer.toString(readHit));
187: map.put("nodeCacheReadMiss", Integer.toString(readMiss));
188: map.put("nodeCacheWriteUnique", Integer.toString(writeUnique));
189: map.put("nodeCacheWriteDouble", Integer.toString(writeDouble));
190: map.put("nodeCacheDeletes", Integer.toString(cacheDelete));
191: map.put("nodeCacheFlushes", Integer.toString(cacheFlush));
192: return map;
193: }
194:
195: protected synchronized void deleteNode(kelondroHandle handle)
196: throws IOException {
197: if (cacheHeaders == null) {
198: super .deleteNode(handle);
199: } else
200: synchronized (cacheHeaders) {
201: if (cacheHeaders.size() == 0) {
202: super .deleteNode(handle);
203: } else {
204: cacheHeaders.removeb(handle.index);
205: cacheDelete++;
206: super .deleteNode(handle);
207: }
208: }
209: }
210:
211: protected void printCache() {
212: if (cacheHeaders == null) {
213: System.out.println("### file report: " + size()
214: + " entries");
215: for (int i = 0; i < USAGE.allCount(); i++) {
216: // print from file to compare
217: System.out.print("#F " + i + ": ");
218: try {
219: for (int j = 0; j < headchunksize; j++)
220: System.out
221: .print(Integer
222: .toHexString(0xff & entryFile
223: .readByte(j
224: + seekpos(new kelondroHandle(
225: i))))
226: + " ");
227: } catch (IOException e) {
228: }
229:
230: System.out.println();
231: }
232: } else {
233: System.out.println("### cache report: "
234: + cacheHeaders.size() + " entries");
235:
236: Iterator<kelondroRow.Entry> i = cacheHeaders.rows();
237: kelondroRow.Entry entry;
238: while (i.hasNext()) {
239: entry = i.next();
240:
241: // print from cache
242: System.out.print("#C ");
243: printChunk(entry);
244: System.out.println();
245:
246: // print from file to compare
247: /*
248: System.out.print("#F " + cp + " " + ((Handle) entry.getKey()).index + ": ");
249: try {
250: for (int j = 0; j < headchunksize; j++)
251: System.out.print(entryFile.readByte(j + seekpos((Handle) entry.getKey())) + ",");
252: } catch (IOException e) {}
253: */
254: System.out.println();
255: }
256: }
257: System.out.println("### end report");
258: }
259:
260: public synchronized void close() {
261: if (cacheHeaders == null) {
262: if (recordTracker.get(this .filename) != null) {
263: theLogger
264: .severe("close(): file '"
265: + this .filename
266: + "' was tracked with record tracker, but it should not.");
267: }
268: } else {
269: if (recordTracker.remove(this .filename) == null) {
270: theLogger.severe("close(): file '" + this .filename
271: + "' was not tracked with record tracker.");
272: }
273: }
274: super .close();
275: this .cacheHeaders = null;
276: }
277:
278: public kelondroProfile[] profiles() {
279: return new kelondroProfile[] {
280: (cacheHeaders == null) ? new kelondroProfile()
281: : cacheHeaders.profile(), entryFile.profile() };
282: }
283:
284: public kelondroProfile profile() {
285: return kelondroProfile.consolidate(profiles());
286: }
287:
288: public void print() throws IOException {
289: super .print();
290:
291: // print also all records
292: System.out.println("CACHE");
293: printCache();
294: System.out.println("--");
295: System.out.println("NODES");
296: Iterator<kelondroNode> i = new contentNodeIterator(-1);
297: kelondroNode n;
298: while (i.hasNext()) {
299: n = (kelondroNode) i.next();
300: System.out.println("NODE: " + n.toString());
301: }
302: }
303:
304: public kelondroNode newNode(kelondroHandle handle, byte[] bulk,
305: int offset) throws IOException {
306: return new CacheNode(handle, bulk, offset);
307: }
308:
309: public final class CacheNode implements kelondroNode {
310: // an Node holds all information of one row of data. This includes the key to the entry
311: // which is stored as entry element at position 0
312: // an Node object can be created in two ways:
313: // 1. instantiation with an index number. After creation the Object does not hold any
314: // value information until such is retrieved using the getValue() method
315: // 2. instantiation with a value array. the values are not directly written into the
316: // file. Expanding the tree structure is then done using the save() method. at any
317: // time it is possible to verify the save state using the saved() predicate.
318: // Therefore an entry object has three modes:
319: // a: holding an index information only (saved() = true)
320: // b: holding value information only (saved() = false)
321: // c: holding index and value information at the same time (saved() = true)
322: // which can be the result of one of the two processes as follow:
323: // (i) created with index and after using the getValue() method, or
324: // (ii) created with values and after calling the save() method
325: // the method will therefore throw an IllegalStateException when the following
326: // process step is performed:
327: // - create the Node with index and call then the save() method
328: // this case can be decided with
329: // ((index != NUL) && (values == null))
330: // The save() method represents the insert function for the tree. Balancing functions
331: // are applied automatically. While balancing, the Node does never change its index key,
332: // but its parent/child keys.
333: //private byte[] ohBytes = null; // the overhead bytes, OHBYTEC values
334: //private Handle[] ohHandle= null; // the overhead handles, OHHANDLEC values
335: //private byte[][] values = null; // an array of byte[] nodes is the value vector
336: private kelondroHandle handle = null; // index of the entry, by default NUL means undefined
337: private byte[] headChunk = null; // contains ohBytes, ohHandles and the key value
338: private byte[] tailChunk = null; // contains all values except the key value
339: private boolean headChanged = false;
340: private boolean tailChanged = false;
341:
342: public CacheNode(byte[] rowinstance) throws IOException {
343: // this initializer is used to create nodes from bulk-read byte arrays
344: assert ((rowinstance == null) || (rowinstance.length == ROW.objectsize)) : "bulkchunk.length = "
345: + rowinstance.length
346: + ", ROW.width(0) = "
347: + ROW.width(0);
348: this .handle = new kelondroHandle(USAGE
349: .allocatePayload(rowinstance));
350:
351: // create empty chunks
352: this .headChunk = new byte[headchunksize];
353: this .tailChunk = new byte[tailchunksize];
354:
355: // write content to chunks
356: if (rowinstance == null) {
357: for (int i = headchunksize - 1; i >= 0; i--)
358: this .headChunk[i] = (byte) 0xff;
359: for (int i = tailchunksize - 1; i >= 0; i--)
360: this .tailChunk[i] = (byte) 0xff;
361: } else {
362: for (int i = overhead - 1; i >= 0; i--)
363: this .headChunk[i] = (byte) 0xff;
364: System.arraycopy(rowinstance, 0, this .headChunk,
365: overhead, ROW.width(0));
366: System.arraycopy(rowinstance, ROW.width(0),
367: this .tailChunk, 0, tailchunksize);
368: }
369:
370: if (cacheHeaders != null)
371: synchronized (cacheHeaders) {
372: updateNodeCache();
373: }
374:
375: // mark chunks as changed
376: // if the head/tail chunks come from a file system read, setChanged should be false
377: // if the chunks come from a overwrite attempt, it should be true
378: this .headChanged = false; // we wrote the head already during allocate
379: this .tailChanged = false; // we write the tail already during allocate
380: }
381:
382: public CacheNode(kelondroHandle handle, byte[] bulkchunk,
383: int offset) throws IOException {
384: // this initializer is used to create nodes from bulk-read byte arrays
385: // if write is true, then the chunk in bulkchunk is written to the file
386: // othervise it is considered equal to what is stored in the file
387: // (that is ensured during pre-loaded enumeration)
388: this .handle = handle;
389: boolean changed;
390: if (handle.index >= USAGE.allCount()) {
391: // this causes only a write action if we create a node beyond the end of the file
392: USAGE.allocateRecord(handle.index, bulkchunk, offset);
393: changed = false; // we have already wrote the record, so it is considered as unchanged
394: } else {
395: changed = true;
396: }
397: assert ((bulkchunk == null) || (bulkchunk.length - offset >= recordsize)) : "bulkchunk.length = "
398: + bulkchunk.length
399: + ", offset = "
400: + offset
401: + ", recordsize = " + recordsize;
402:
403: // create empty chunks
404: this .headChunk = new byte[headchunksize];
405: this .tailChunk = new byte[tailchunksize];
406:
407: // write content to chunks
408: if (bulkchunk != null) {
409: System.arraycopy(bulkchunk, offset, this .headChunk, 0,
410: headchunksize);
411: System.arraycopy(bulkchunk, offset + headchunksize,
412: this .tailChunk, 0, tailchunksize);
413: }
414:
415: // mark chunks as changed
416: this .headChanged = changed;
417: this .tailChanged = changed;
418: }
419:
420: public CacheNode(kelondroHandle handle, boolean fillTail)
421: throws IOException {
422: this (handle, null, 0, fillTail);
423: }
424:
425: public CacheNode(kelondroHandle handle, CacheNode parentNode,
426: int referenceInParent, boolean fillTail)
427: throws IOException {
428: // this creates an entry with an pre-reserved entry position.
429: // values can be written using the setValues() method,
430: // but we expect that values are already there in the file.
431: assert (handle != null) : "node handle is null";
432: assert (handle.index >= 0) : "node handle too low: "
433: + handle.index;
434: //assert (handle.index < USAGE.allCount()) : "node handle too high: " + handle.index + ", USEDC=" + USAGE.USEDC + ", FREEC=" + USAGE.FREEC;
435:
436: // the parentNode can be given if an auto-fix in the following case is wanted
437: if (handle == null)
438: throw new kelondroException(filename,
439: "INTERNAL ERROR: node handle is null.");
440: if (handle.index >= USAGE.allCount()) {
441: if (parentNode == null)
442: throw new kelondroException(
443: filename,
444: "INTERNAL ERROR, Node/init: node handle index "
445: + handle.index
446: + " exceeds size. No auto-fix node was submitted. This is a serious failure.");
447: try {
448: parentNode.setOHHandle(referenceInParent, null);
449: parentNode.commit();
450: logWarning("INTERNAL ERROR, Node/init in "
451: + filename
452: + ": node handle index "
453: + handle.index
454: + " exceeds size. The bad node has been auto-fixed");
455: } catch (IOException ee) {
456: throw new kelondroException(
457: filename,
458: "INTERNAL ERROR, Node/init: node handle index "
459: + handle.index
460: + " exceeds size. It was tried to fix the bad node, but failed with an IOException: "
461: + ee.getMessage());
462: }
463: }
464:
465: // use given handle
466: this .handle = new kelondroHandle(handle.index);
467:
468: // check for memory availability when fillTail is requested
469: if ((fillTail) && (tailchunksize > 10000))
470: fillTail = false; // this is a fail-safe 'short version' of a memory check
471:
472: // init the content
473: // create chunks; read them from file or cache
474: this .tailChunk = null;
475: if (cacheHeaders == null) {
476: if (fillTail) {
477: // read complete record
478: byte[] chunkbuffer = new byte[recordsize];
479: entryFile.readFully(seekpos(this .handle),
480: chunkbuffer, 0, recordsize);
481: this .headChunk = new byte[headchunksize];
482: this .tailChunk = new byte[tailchunksize];
483: System.arraycopy(chunkbuffer, 0, this .headChunk, 0,
484: headchunksize);
485: System.arraycopy(chunkbuffer, headchunksize,
486: this .tailChunk, 0, tailchunksize);
487: chunkbuffer = null;
488: } else {
489: // read overhead and key
490: this .headChunk = new byte[headchunksize];
491: this .tailChunk = null;
492: entryFile.readFully(seekpos(this .handle),
493: this .headChunk, 0, headchunksize);
494: }
495: } else
496: synchronized (cacheHeaders) {
497: byte[] cacheEntry = null;
498: cacheEntry = cacheHeaders.getb(this .handle.index);
499: if (cacheEntry == null) {
500: // cache miss, we read overhead and key from file
501: readMiss++;
502: if (fillTail) {
503: // read complete record
504: byte[] chunkbuffer = new byte[recordsize];
505: entryFile.readFully(seekpos(this .handle),
506: chunkbuffer, 0, recordsize);
507: this .headChunk = new byte[headchunksize];
508: this .tailChunk = new byte[tailchunksize];
509: System.arraycopy(chunkbuffer, 0,
510: this .headChunk, 0, headchunksize);
511: System.arraycopy(chunkbuffer,
512: headchunksize, this .tailChunk, 0,
513: tailchunksize);
514: chunkbuffer = null;
515: } else {
516: // read overhead and key
517: this .headChunk = new byte[headchunksize];
518: this .tailChunk = null;
519: entryFile.readFully(seekpos(this .handle),
520: this .headChunk, 0, headchunksize);
521: }
522:
523: // if space left in cache, copy these value to the cache
524: updateNodeCache();
525: } else {
526: readHit++;
527: this .headChunk = cacheEntry;
528: }
529: }
530: }
531:
532: private void setValue(byte[] value, int valueoffset,
533: int valuewidth, byte[] targetarray, int targetoffset) {
534: if (value == null) {
535: while (valuewidth-- > 0)
536: targetarray[targetoffset++] = 0;
537: } else {
538: assert ((valueoffset >= 0) && (valueoffset < value.length)) : "valueoffset = "
539: + valueoffset;
540: assert ((valueoffset + valuewidth <= value.length)) : "valueoffset = "
541: + valueoffset
542: + ", valuewidth = "
543: + valuewidth
544: + ", value.length = " + value.length;
545: assert ((targetoffset >= 0) && (targetoffset < targetarray.length)) : "targetoffset = "
546: + targetoffset;
547: assert ((targetoffset + valuewidth <= targetarray.length)) : "targetoffset = "
548: + targetoffset
549: + ", valuewidth = "
550: + valuewidth
551: + ", targetarray.length = "
552: + targetarray.length;
553: System.arraycopy(value, valueoffset, targetarray,
554: targetoffset, Math
555: .min(value.length, valuewidth)); // error?
556: while (valuewidth-- > value.length)
557: targetarray[targetoffset + valuewidth] = 0;
558: }
559: }
560:
561: public kelondroHandle handle() {
562: // if this entry has an index, return it
563: if (this .handle.index == kelondroHandle.NUL)
564: throw new kelondroException(filename,
565: "the entry has no index assigned");
566: return this .handle;
567: }
568:
569: public void setOHByte(int i, byte b) {
570: if (i >= OHBYTEC)
571: throw new IllegalArgumentException(
572: "setOHByte: wrong index " + i);
573: if (this .handle.index == kelondroHandle.NUL)
574: throw new kelondroException(filename,
575: "setOHByte: no handle assigned");
576: this .headChunk[i] = b;
577: this .headChanged = true;
578: }
579:
580: public void setOHHandle(int i, kelondroHandle otherhandle) {
581: assert (i < OHHANDLEC) : "setOHHandle: wrong array size "
582: + i;
583: assert (this .handle.index != kelondroHandle.NUL) : "setOHHandle: no handle assigned ind file"
584: + filename;
585: if (otherhandle == null) {
586: NUL2bytes(this .headChunk, OHBYTEC + 4 * i);
587: } else {
588: if (otherhandle.index >= USAGE.allCount())
589: throw new kelondroException(filename,
590: "INTERNAL ERROR, setOHHandles: handle " + i
591: + " exceeds file size ("
592: + handle.index + " >= "
593: + USAGE.allCount() + ")");
594: int2bytes(otherhandle.index, this .headChunk, OHBYTEC
595: + 4 * i);
596: }
597: this .headChanged = true;
598: }
599:
600: public byte getOHByte(int i) {
601: if (i >= OHBYTEC)
602: throw new IllegalArgumentException(
603: "getOHByte: wrong index " + i);
604: if (this .handle.index == kelondroHandle.NUL)
605: throw new kelondroException(filename,
606: "Cannot load OH values");
607: return this .headChunk[i];
608: }
609:
610: public kelondroHandle getOHHandle(int i) {
611: if (this .handle.index == kelondroHandle.NUL)
612: throw new kelondroException(filename,
613: "Cannot load OH values");
614: assert (i < OHHANDLEC) : "handle index out of bounds: " + i
615: + " in file " + filename;
616: int h = bytes2int(this .headChunk, OHBYTEC + 4 * i);
617: return (h == kelondroHandle.NUL) ? null
618: : new kelondroHandle(h);
619: }
620:
621: public synchronized void setValueRow(byte[] row)
622: throws IOException {
623: // if the index is defined, then write values directly to the file, else only to the object
624: if ((row != null) && (row.length != ROW.objectsize))
625: throw new IOException("setValueRow with wrong ("
626: + row.length + ") row length instead correct: "
627: + ROW.objectsize);
628:
629: // set values
630: if (this .handle.index != kelondroHandle.NUL) {
631: setValue(row, 0, ROW.width(0), headChunk, overhead);
632: if (ROW.columns() > 1)
633: setValue(row, ROW.width(0), tailchunksize,
634: tailChunk, 0);
635: }
636: this .headChanged = true;
637: this .tailChanged = true;
638: }
639:
640: public synchronized boolean valid() {
641: // returns true if the key starts with non-zero byte
642: // this may help to detect deleted entries
643: return (headChunk[overhead] != 0)
644: && ((headChunk[overhead] != -128) || (headChunk[overhead + 1] != 0));
645: }
646:
647: public synchronized byte[] getKey() {
648: // read key
649: return trimCopy(headChunk, overhead, ROW.width(0));
650: }
651:
652: public synchronized byte[] getValueRow() throws IOException {
653:
654: if (this .tailChunk == null) {
655: // load all values from the database file
656: this .tailChunk = new byte[tailchunksize];
657: // read values
658: entryFile.readFully(seekpos(this .handle)
659: + (long) headchunksize, this .tailChunk, 0,
660: this .tailChunk.length);
661: }
662:
663: // create return value
664: byte[] row = new byte[ROW.objectsize];
665:
666: // read key
667: System.arraycopy(headChunk, overhead, row, 0, ROW.width(0));
668:
669: // read remaining values
670: System.arraycopy(tailChunk, 0, row, ROW.width(0),
671: tailchunksize);
672:
673: return row;
674: }
675:
676: public synchronized void commit() throws IOException {
677: // this must be called after all write operations to the node are
678: // finished
679:
680: // place the data to the file
681:
682: if (this .headChunk == null) {
683: // there is nothing to save
684: throw new kelondroException(filename,
685: "no values to save (header missing)");
686: }
687:
688: boolean doCommit = this .headChanged || this .tailChanged;
689:
690: // save head
691: synchronized (entryFile) {
692: if (this .headChanged) {
693: //System.out.println("WRITEH(" + filename + ", " + seekpos(this.handle) + ", " + this.headChunk.length + ")");
694: assert (headChunk == null)
695: || (headChunk.length == headchunksize);
696: entryFile
697: .write(
698: seekpos(this .handle),
699: (this .headChunk == null) ? new byte[headchunksize]
700: : this .headChunk);
701: updateNodeCache();
702: this .headChanged = false;
703: }
704:
705: // save tail
706: if ((this .tailChunk != null) && (this .tailChanged)) {
707: //System.out.println("WRITET(" + filename + ", " + (seekpos(this.handle) + headchunksize) + ", " + this.tailChunk.length + ")");
708: assert (tailChunk == null)
709: || (tailChunk.length == tailchunksize);
710: entryFile
711: .write(
712: seekpos(this .handle)
713: + headchunksize,
714: (this .tailChunk == null) ? new byte[tailchunksize]
715: : this .tailChunk);
716: this .tailChanged = false;
717: }
718:
719: if (doCommit)
720: entryFile.commit();
721: }
722: }
723:
724: public String toString() {
725: if (this .handle.index == kelondroHandle.NUL)
726: return "NULL";
727: String s = Integer.toHexString(this .handle.index);
728: kelondroHandle h;
729: while (s.length() < 4)
730: s = "0" + s;
731: try {
732: for (int i = 0; i < OHBYTEC; i++)
733: s = s + ":b" + getOHByte(i);
734: for (int i = 0; i < OHHANDLEC; i++) {
735: h = getOHHandle(i);
736: if (h == null)
737: s = s + ":hNULL";
738: else
739: s = s + ":h" + h.toString();
740: }
741: kelondroRow.Entry content = row().newEntry(
742: getValueRow());
743: for (int i = 0; i < row().columns(); i++)
744: s = s
745: + ":"
746: + ((content.empty(i)) ? "NULL" : content
747: .getColString(i, "UTF-8").trim());
748: } catch (IOException e) {
749: s = s + ":***LOAD ERROR***:" + e.getMessage();
750: }
751: return s;
752: }
753:
754: private boolean cacheSpace() {
755: // check for space in cache
756: // should be only called within a synchronized(cacheHeaders) environment
757: // returns true if it is allowed to add another entry to the cache
758: // returns false if the cache is considered to be full
759: if (cacheHeaders == null)
760: return false; // no caching
761: if (cacheHeaders.size() == 0)
762: return true; // nothing there to flush
763: if (cacheGrowStatus() == 2)
764: return true; // no need to flush cache space
765:
766: // just delete any of the entries
767: if (cacheGrowStatus() <= 1)
768: synchronized (cacheHeaders) {
769: cacheHeaders.removeoneb();
770: cacheFlush++;
771: }
772: return cacheGrowStatus() > 0;
773: }
774:
775: private void updateNodeCache() {
776: if (this .handle == null)
777: return; // wrong access
778: if (this .headChunk == null)
779: return; // nothing there to cache
780: if (cacheHeaders == null)
781: return; // we do not use the cache
782: if (cacheSpace())
783: synchronized (cacheHeaders) {
784: // generate cache entry
785: //byte[] cacheEntry = new byte[headchunksize];
786: //System.arraycopy(headChunk, 0, cacheEntry, 0, headchunksize);
787:
788: // store the cache entry
789: boolean upd = false;
790: upd = (cacheHeaders.putb(this .handle.index,
791: headChunk) != null);
792: if (upd)
793: writeDouble++;
794: else
795: writeUnique++;
796:
797: //System.out.println("kelondroRecords cache4" + filename + ": cache record size = " + (memBefore - Runtime.getRuntime().freeMemory()) + " bytes" + ((newentry) ? " new" : ""));
798: //printCache();
799: }
800: else {
801: // there shall be no entry in the cache. If one exists, we remove it
802: boolean rem = false;
803: rem = (cacheHeaders.removeb(this.handle.index) != null);
804: if (rem)
805: cacheDelete++;
806: }
807: }
808: }
809:
810: }
|