001: /**
002: * com.mckoi.database.V1MasterTableDataSource 01 Sep 2002
003: *
004: * Mckoi SQL Database ( http://www.mckoi.com/database )
005: * Copyright (C) 2000, 2001, 2002 Diehl and Associates, Inc.
006: *
007: * This program is free software; you can redistribute it and/or
008: * modify it under the terms of the GNU General Public License
009: * Version 2 as published by the Free Software Foundation.
010: *
011: * This program is distributed in the hope that it will be useful,
012: * but WITHOUT ANY WARRANTY; without even the implied warranty of
013: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
014: * GNU General Public License Version 2 for more details.
015: *
016: * You should have received a copy of the GNU General Public License
017: * Version 2 along with this program; if not, write to the Free Software
018: * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
019: *
020: * Change Log:
021: *
022: *
023: */package com.mckoi.database;
024:
025: import java.util.ArrayList;
026: import java.io.*;
027: import com.mckoi.util.IntegerListInterface;
028: import com.mckoi.util.ByteArrayUtil;
029: import com.mckoi.util.UserTerminal;
030: import com.mckoi.debug.*;
031:
032: /**
033: * A MasterTableDataSource that uses IndexStore and VariableSizeDataStore as
034: * its backing mechanism for representing the table structure in a file on
035: * disk.
036: * <p>
037: * The MasterTableDataSource is basically backed by a VariableSizeDataStore
038: * for data and an IndexStore for storing indexing information.
039: *
040: * @author Tobias Downer
041: */
042:
043: public final class V1MasterTableDataSource extends
044: MasterTableDataSource {
045:
046: // ---------- State information ----------
047:
048: /**
049: * The file name of this store in the conglomerate path.
050: */
051: private String file_name;
052:
053: /**
054: * A VariableSizeDataStore object that physically contains the information
055: * stored in the file system in the contents of the data source.
056: */
057: private VariableSizeDataStore data_store;
058:
059: /**
060: * An IndexStore object that manages the indexes for this table.
061: */
062: private IndexStore index_store;
063:
064: /**
065: * The object we use to serialize TObject objects.
066: */
067: private final DataCellSerialization data_cell_serializer = new DataCellSerialization();
068:
069: /**
070: * The persistent object we use to read information from a row stream.
071: */
072: private CellInputStream cell_in;
073:
074: /**
075: * The Constructor.
076: */
077: public V1MasterTableDataSource(TransactionSystem system,
078: StoreSystem store_system,
079: OpenTransactionList open_transactions) {
080: super (system, store_system, open_transactions, null);
081: cell_in = new CellInputStream(null);
082: }
083:
084: /**
085: * Returns the name of the file in the conglomerate that represents this
086: * store in the file system.
087: */
088: String getFileName() {
089: return file_name;
090: }
091:
092: /**
093: * Returns the path of where this table is located.
094: */
095: File getPath() {
096: return getSystem().getDatabasePath();
097: }
098:
099: /**
100: * Physically create this master table in the file system at the given
101: * path. This will initialise the various file objects and result in a
102: * new empty master table to store data in.
103: * <p>
104: * The 'data_sector_size' and 'index_sector_size' are for fine grain
105: * performance and size optimization of the data files. The default
106: * 'index_sector_size' is 1024.
107: *
108: * @param data_sector_size used to configure the size of the sectors in the
109: * data files. For tables with small records this number should be low.
110: * @param index_sector_size used to configure the size of the sectors in the
111: * index file. For small tables it is best to keep the index sector size
112: * low. Recommend 1024 for normal use, 128 for minimalist use.
113: */
114: synchronized void create(int table_id, DataTableDef table_def,
115: int data_sector_size, int index_sector_size)
116: throws IOException {
117:
118: // Setup the internal methods
119: setupDataTableDef(table_def);
120:
121: // Generate the name of the store file name.
122: this .file_name = makeTableFileName(getSystem(), table_id,
123: getTableName());
124:
125: // Create the store.
126: data_store = new VariableSizeDataStore(new File(getPath(),
127: file_name), data_sector_size, Debug());
128: // Open the store in read/write mode
129: data_store.open(false);
130:
131: // Open table indices
132: index_store = new IndexStore(new File(getPath(), file_name
133: + ".iid"), Debug());
134: // Open the table index file.
135: index_store.create(index_sector_size);
136: index_store.init();
137: // Make room for columns+1 indices in the index store file
138: index_store
139: .addIndexLists(table_def.columnCount() + 1, (byte) 1);
140: index_store.flush();
141:
142: // Save the table definition to the new store.
143: saveDataTableDef(table_def);
144:
145: // Write the 'table_id' of this table to the reserved area of the data
146: // store.
147: byte[] reserved_buffer = new byte[64];
148: ByteArrayUtil.setInt(table_id, reserved_buffer, 0);
149: data_store.writeReservedBuffer(reserved_buffer, 0, 64);
150:
151: // Set up internal state of this object
152: this .table_id = table_id;
153:
154: // Load internal state
155: loadInternal();
156:
157: }
158:
159: /**
160: * Returns true if the master table data source with the given filename
161: * exists.
162: */
163: synchronized boolean exists(String file_name) throws IOException {
164: VariableSizeDataStore data_store = new VariableSizeDataStore(
165: new File(getPath(), file_name), Debug());
166: return data_store.exists();
167: }
168:
169: /**
170: * Opens an existing master table from the file system at the path of the
171: * conglomerate this belongs to. This will set up the internal state of
172: * this object with the data read in.
173: */
174: synchronized void open(String file_name) throws IOException {
175:
176: // Open the store.
177: data_store = new VariableSizeDataStore(new File(getPath(),
178: file_name), Debug());
179: boolean need_check = data_store.open(isReadOnly());
180:
181: // Set up the internal state of this object
182: // Get the 'table_id' of this table from the reserved area of the data
183: // store.
184: byte[] reserved_buffer = new byte[64];
185: data_store.readReservedBuffer(reserved_buffer, 0, 64);
186: table_id = ByteArrayUtil.getInt(reserved_buffer, 0);
187:
188: // Set the file name.
189: this .file_name = file_name;
190:
191: // Load the table definition from the store.
192: table_def = loadDataTableDef();
193:
194: // Set the column count
195: column_count = table_def.columnCount();
196:
197: // Open table indices
198: table_indices = new MultiVersionTableIndices(getSystem(),
199: table_def.getTableName(), table_def.columnCount());
200: // The column rid list cache
201: column_rid_list = new RIDList[table_def.columnCount()];
202:
203: // Open table indices
204: index_store = new IndexStore(new File(getPath(), file_name
205: + ".iid"), Debug());
206: // If the index store doesn't exist then create it.
207: if (!index_store.exists()) {
208: if (!isReadOnly()) {
209: // Does the original .ijf file exist?
210: File original_ijf = new File(getPath(), file_name
211: + ".ijf");
212: if (original_ijf.exists()) {
213: // Message
214: String str = "Converting index file for: "
215: + file_name;
216: System.out.println(str);
217: Debug().write(Lvl.INFORMATION, this , str);
218: // NOTE: The following method leaves the index store open.
219: ArrayList transaction_journals = ConvertUtils
220: .convertIndexFiles1(original_ijf,
221: index_store, table_def, Debug());
222: if (transaction_journals.size() > 0) {
223: // Notify the user that this may be a problem
224: Debug()
225: .write(
226: Lvl.ERROR,
227: this ,
228: "There are uncommitted changes that were not "
229: + "converted because the pre 0.92 database was not closed "
230: + "cleanly.");
231: }
232: // Force a full table scan
233: need_check = true;
234: } else {
235: throw new IOException("The index file for '"
236: + file_name + "' does not exist.");
237: }
238: } else {
239: throw new IOException(
240: "Can not create .iid index file in read-only mode.");
241: }
242: } else {
243: // Open the table index file.
244: index_store.open(isReadOnly());
245: index_store.init();
246: }
247:
248: // Load internal state
249: loadInternal();
250:
251: // Setup a DataIndexSetDef from the information here
252: setupDataIndexSetDef();
253:
254: if (need_check) {
255: // Do an opening scan of the table. Any records that are uncommited
256: // must be marked as deleted.
257: doOpeningScan();
258: }
259:
260: }
261:
262: /**
263: * Opens this source in the most minimal way. This should only be used
264: * for diagnostics of the data. This will not load the index.
265: */
266: synchronized void dirtyOpen(String file_name) throws IOException {
267:
268: // We have to open this...
269: // Open the store.
270: data_store = new VariableSizeDataStore(new File(getPath(),
271: file_name), Debug());
272: data_store.open(false);
273:
274: // Set up the internal state of this object
275: // Get the 'table_id' of this table from the reserved area of the data
276: // store.
277: byte[] reserved_buffer = new byte[64];
278: data_store.readReservedBuffer(reserved_buffer, 0, 64);
279: table_id = ByteArrayUtil.getInt(reserved_buffer, 0);
280:
281: // Set the file name.
282: this .file_name = file_name;
283:
284: // Load the table definition from the store.
285: table_def = loadDataTableDef();
286:
287: }
288:
289: /**
290: * Closes this master table in the file system. This frees up all the
291: * resources associated with this master table.
292: * <p>
293: * This method is typically called when the database is shut down.
294: */
295: synchronized void close() throws IOException {
296: if (table_indices != null) {
297: // Merge all journal changes when we close
298: mergeJournalChanges(Integer.MAX_VALUE);
299:
300: if (!isReadOnly()) {
301: // Synchronize the current state with the file system.
302: index_store.flush();
303: //table_indices.synchronizeIndexFile();
304: }
305: }
306:
307: // Close the index store.
308: index_store.close();
309: data_store.close();
310:
311: table_id = -1;
312: // file_name = null;
313: table_def = null;
314: table_indices = null;
315: column_rid_list = null;
316: is_closed = true;
317: }
318:
319: /**
320: * Returns the number of bytes the row takes up in the data file. This is
321: * the actual space used. If a cell is compressed then it includes the
322: * compressed size, not the uncompressed.
323: */
324: synchronized int rawRecordSize(int row_number) throws IOException {
325:
326: int size = 2;
327:
328: ++row_number;
329:
330: // Open a stream for this row.
331: InputStream in = data_store.getRecordInputStream(row_number);
332: cell_in.setParentStream(in);
333:
334: cell_in.skip(2);
335:
336: for (int i = 0; i < column_count; ++i) {
337: int len = data_cell_serializer.skipSerialization(cell_in);
338: if (len <= 0) {
339: throw new Error("Corrupt data - cell size is <= 0");
340: }
341: cell_in.skip(len);
342: size += 4 + len;
343: }
344:
345: cell_in.close();
346:
347: return size;
348:
349: }
350:
351: /**
352: * Returns the current sector size for this data source.
353: */
354: synchronized int rawDataSectorSize() throws IOException {
355: return data_store.sectorSize();
356: }
357:
358: /**
359: * This may only be called from the 'fix' method. It performs a full scan of
360: * the records and rebuilds all the index information from the information.
361: * <p>
362: * This should only be used as a recovery mechanism and may not accurately
363: * rebuild in some cases (but should rebuild as best as possible non the
364: * less).
365: */
366: private synchronized void rebuildAllIndices(File path,
367: String file_name) throws IOException {
368:
369: // Temporary name of the index store
370: File temporary_name = new File(path, file_name + ".id2");
371: // Actual name of the index store
372: File actual_name = new File(path, file_name + ".iid");
373:
374: // Make a new blank index store
375: IndexStore temp_store = new IndexStore(temporary_name, Debug());
376: // Copy the same block size as the original
377: temp_store.create(index_store.getBlockSize());
378: temp_store.init();
379: temp_store.addIndexLists(column_count + 1, (byte) 1);
380:
381: // Get the index of rows in this table
382: IndexSet index_set = temp_store.getSnapshotIndexSet();
383:
384: // The master index,
385: IntegerListInterface master_index = index_set.getIndex(0);
386:
387: // The selectable schemes for the table.
388: TableDataSource table = minimalTableDataSource(master_index);
389:
390: // Create a set of index for this table.
391: SelectableScheme[] cols = new SelectableScheme[column_count];
392: for (int i = 0; i < column_count; ++i) {
393: cols[i] = createSelectableSchemeForColumn(index_set, table,
394: i);
395: }
396:
397: // For each row
398: int row_count = rawRowCount();
399: for (int i = 0; i < row_count; ++i) {
400: // Is this record marked as deleted?
401: if (!recordDeleted(i)) {
402: // Get the type flags for this record.
403: int type = recordTypeInfo(i);
404: // Check if this record is marked as committed removed, or is an
405: // uncommitted record.
406: if (type == RawDiagnosticTable.COMMITTED_ADDED) {
407: // Insert into the master index
408: master_index.uniqueInsertSort(i);
409: // Insert into schemes
410: for (int n = 0; n < column_count; ++n) {
411: cols[n].insert(i);
412: }
413: }
414: } // if not deleted
415: } // for each row
416:
417: // Commit the index store
418:
419: // Write the modified index set to the index store
420: // (Updates the index file)
421: temp_store.commitIndexSet(index_set);
422: index_set.dispose();
423: temp_store.flush();
424:
425: // Close and delete the original index_store
426: index_store.close();
427: index_store.delete();
428: // Close the temporary store
429: temp_store.close();
430: // Rename temp file to the actual file
431: boolean b = temporary_name.renameTo(actual_name);
432: if (b == false) {
433: throw new IOException("Unable to rename " + temporary_name
434: + " to " + actual_name);
435: }
436: temp_store = null;
437:
438: // Copy and open the new reference
439: index_store = new IndexStore(actual_name, Debug());
440: index_store.open(false);
441: index_store.init();
442:
443: }
444:
445: /**
446: * Copies the persistant information in this table data source to the given
447: * directory in the file system. This makes an exact copy of the table as
448: * it currently is. It is recommended that when this is used, there is a
449: * lock to prevent committed changes to the database.
450: */
451: synchronized void copyTo(File path) throws IOException {
452: data_store.copyTo(path);
453: index_store.copyTo(path);
454: }
455:
456: // ---------- Diagnostic and repair ----------
457:
458: /**
459: * Performs a complete check and repair of the table. The table must not
460: * have been opened before this method is called. The given UserTerminal
461: * parameter is an implementation of a user interface that is used to ask
462: * any questions and output the results of the check.
463: */
464: public synchronized void checkAndRepair(String file_name,
465: UserTerminal terminal) throws IOException {
466:
467: // Open the store.
468: data_store = new VariableSizeDataStore(new File(getPath(),
469: file_name), Debug());
470: boolean need_check = data_store.open(isReadOnly());
471: // if (need_check) {
472: data_store.fix(terminal);
473: // }
474:
475: // Set up the internal state of this object
476: // Get the 'table_id' of this table from the reserved area of the data
477: // store.
478: byte[] reserved_buffer = new byte[64];
479: data_store.readReservedBuffer(reserved_buffer, 0, 64);
480: table_id = ByteArrayUtil.getInt(reserved_buffer, 0);
481:
482: // Set the file name.
483: this .file_name = file_name;
484:
485: // Load the table definition from the store.
486: table_def = loadDataTableDef();
487:
488: // Table journal information
489: table_indices = new MultiVersionTableIndices(getSystem(),
490: table_def.getTableName(), table_def.columnCount());
491: // The column rid list cache
492: column_rid_list = new RIDList[table_def.columnCount()];
493:
494: // Open table indices
495: index_store = new IndexStore(new File(getPath(), file_name
496: + ".iid"), Debug());
497: // Open the table index file.
498: need_check = index_store.open(isReadOnly());
499: // Attempt to fix the table index file.
500: boolean index_store_stable = index_store.fix(terminal);
501:
502: // Load internal state
503: loadInternal();
504:
505: // Merge all journal changes when we open
506: mergeJournalChanges(Integer.MAX_VALUE);
507:
508: // If the index store is not stable then clear it and rebuild the
509: // indices.
510: // if (!index_store_stable) {
511: terminal
512: .println("+ Rebuilding all index information for table!");
513: rebuildAllIndices(getPath(), file_name);
514: // }
515:
516: // Do an opening scan of the table. Any records that are uncommited
517: // must be marked as deleted.
518: doOpeningScan();
519:
520: }
521:
522: public synchronized void checkForCleanup() {
523: // No-op
524: }
525:
526: // ---------- Implemented from AbstractMasterTableDataSource ----------
527:
528: String getSourceIdent() {
529: return getFileName();
530: }
531:
532: synchronized void synchAll() throws IOException {
533:
534: // Flush the indices.
535: index_store.flush();
536:
537: // Synchronize the data store.
538: if (!getSystem().dontSynchFileSystem()) {
539: data_store.hardSynch();
540: }
541:
542: // Synchronize the file handle. When this returns, we are guarenteed that
543: // the index store and the data store are nore persistantly stored in the
544: // file system.
545: if (!getSystem().dontSynchFileSystem()) {
546: index_store.hardSynch();
547: }
548:
549: }
550:
551: synchronized int writeRecordType(int row_index, int row_state)
552: throws IOException {
553: return data_store.writeRecordType(row_index + 1, row_state);
554: }
555:
556: synchronized int readRecordType(int row_index) throws IOException {
557: return data_store.readRecordType(row_index + 1);
558: }
559:
560: synchronized boolean recordDeleted(int row_index)
561: throws IOException {
562: return data_store.recordDeleted(row_index + 1);
563: }
564:
565: synchronized int rawRowCount() throws IOException {
566: return data_store.rawRecordCount() - 1;
567: }
568:
569: synchronized void internalDeleteRow(int row_index)
570: throws IOException {
571: // Delete the row permanently from the data store.
572: data_store.delete(row_index + 1);
573: }
574:
575: IndexSet createIndexSet() {
576: return index_store.getSnapshotIndexSet();
577: }
578:
579: synchronized void commitIndexSet(IndexSet index_set) {
580: index_store.commitIndexSet(index_set);
581: index_set.dispose();
582: }
583:
584: synchronized DataTableDef loadDataTableDef() throws IOException {
585:
586: // Read record 0 which contains all this info.
587: byte[] d = new byte[65536];
588: int read = data_store.read(0, d, 0, 65536);
589: if (read == 65536) {
590: throw new IOException(
591: "Buffer overflow when reading table definition, > 64k");
592: }
593: ByteArrayInputStream bin = new ByteArrayInputStream(d, 0, read);
594:
595: DataTableDef def;
596:
597: DataInputStream din = new DataInputStream(bin);
598: int mn = din.readInt();
599: // This is the latest format...
600: if (mn == 0x0bebb) {
601: // Read the DataTableDef object from the input stream,
602: def = DataTableDef.read(din);
603: } else {
604: // Legacy no longer supported...
605: throw new IOException(
606: "Couldn't find magic number for table definition data.");
607: }
608:
609: return def;
610:
611: }
612:
613: synchronized void saveDataTableDef(DataTableDef def)
614: throws IOException {
615:
616: ByteArrayOutputStream bout = new ByteArrayOutputStream();
617: DataOutputStream dout = new DataOutputStream(bout);
618:
619: dout.writeInt(0x0bebb);
620: def.write(dout);
621:
622: // Write the byte array to the data store,
623:
624: byte[] d = bout.toByteArray();
625: int rindex = data_store.write(d, 0, d.length);
626:
627: // rindex MUST be 0 else we buggered.
628: if (rindex != 0) {
629: throw new IOException(
630: "Couldn't write table fields to record 0.");
631: }
632:
633: }
634:
635: synchronized int internalAddRow(RowData data) throws IOException {
636:
637: OutputStream out = data_store.getRecordOutputStream();
638: DataOutputStream temp_out = new DataOutputStream(out);
639:
640: // Reserved for future use.
641: temp_out.writeShort(0);
642:
643: int row_cells = data.getColumnCount();
644:
645: // Write out the data,
646: for (int i = 0; i < row_cells; ++i) {
647: TObject cell = data.getCellData(i);
648: data_cell_serializer.setToSerialize(cell);
649: data_cell_serializer.writeSerialization(temp_out);
650: }
651:
652: // Close the stream and complete it.
653: temp_out.close();
654: int record_index = data_store.completeRecordStreamWrite();
655:
656: // Update the cell cache as appropriate
657: if (DATA_CELL_CACHING) {
658: for (int i = 0; i < row_cells; ++i) {
659: // Put the row/column/TObject into the cache.
660: cache.put(table_id, record_index, i, data
661: .getCellData(i));
662: }
663: }
664:
665: // Record index is -1 because sector 0 is DataTableDef.
666: int row_number = record_index - 1;
667:
668: // If we have a rid_list for any of the columns, then update the indexing
669: // there,
670: for (int i = 0; i < column_count; ++i) {
671: RIDList rid_list = column_rid_list[i];
672: if (rid_list != null) {
673: rid_list.insertRID(data.getCellData(i), row_number);
674: }
675: }
676:
677: // Return the record index of the new data in the table
678: return row_number;
679:
680: }
681:
682: // ---- getCellContents ----
683:
684: private short s_run_total_hits = 0;
685: private short s_run_file_hits = 0;
686:
687: // ---- Optimization that saves some cycles -----
688:
689: /**
690: * Some variables that are used for optimization in the 'getCellContents'
691: * method.
692: */
693: private int OPT_last_row = -1;
694: private int OPT_last_col = -1;
695: private int OPT_last_skip_offset = -1;
696:
697: synchronized TObject internalGetCellContents(int column, int row) {
698:
699: // NOTES:
700: // This is called *A LOT*. It's a key part of the 20% of the program
701: // that's run 80% of the time.
702: // This performs very nicely for rows that are completely contained within
703: // 1 sector. However, rows that contain large cells (eg. a large binary
704: // or a large string) and spans many sectors will not be utilizing memory
705: // as well as it could.
706: // The reason is because all the data for a row is read from the store even
707: // if only 1 cell of the column is requested. This will have a big
708: // impact on column scans and searches. The cell cache takes some of this
709: // performance bottleneck away.
710: // However, a better implementation of this method is made difficult by
711: // the fact that sector spans can be compressed. We should perhaps
712: // revise the low level data storage so only sectors can be compressed.
713:
714: // If the database stats need updating then do so now.
715: if (s_run_total_hits >= 1600) {
716: getSystem().stats().add(s_run_total_hits, total_hits_key);
717: getSystem().stats().add(s_run_file_hits, file_hits_key);
718: s_run_total_hits = 0;
719: s_run_file_hits = 0;
720: }
721:
722: // Increment the total hits counter
723: ++s_run_total_hits;
724:
725: // Row 0 is reserved for DataTableDef
726: ++row;
727:
728: // First check if this is within the cache before we continue.
729: TObject cell;
730: if (DATA_CELL_CACHING) {
731: cell = cache.get(table_id, row, column);
732: if (cell != null) {
733: return cell;
734: }
735: }
736:
737: // Increment the file hits counter
738: ++s_run_file_hits;
739:
740: // We maintain a cache of byte[] arrays that contain the rows read in
741: // from the file. If consequtive reads are made to the same row, then
742: // this will cause lots of fast cache hits.
743:
744: try {
745:
746: // Open a stream for this row.
747: InputStream in = data_store.getRecordInputStream(row);
748: cell_in.setParentStream(in);
749:
750: // NOTE: This is an optimization for a common sequence of pulling cells
751: // from a row. It remembers the index of the last column read in, and
752: // if the next column requested is > than the last column read, then
753: // it trivially skips the file pointer to the old point.
754: // Variables starting with 'OPT_' are member variables used for
755: // keeping the optimization state information.
756:
757: int start_col;
758: if (OPT_last_row == row && column >= OPT_last_col) {
759: cell_in.skip(OPT_last_skip_offset);
760: start_col = OPT_last_col;
761: } else {
762: cell_in.skip(2);
763: OPT_last_row = row;
764: OPT_last_skip_offset = 2;
765: OPT_last_col = 0;
766: start_col = 0;
767: }
768:
769: for (int i = start_col; i < column; ++i) {
770: int len = data_cell_serializer
771: .skipSerialization(cell_in);
772: if (len <= 0) {
773: throw new Error("Corrupt data - cell size is <= 0");
774: }
775: cell_in.skip(len);
776: ++OPT_last_col;
777: OPT_last_skip_offset += len + 4; // ( +4 for the header )
778: }
779: // Read the cell
780: Object ob = data_cell_serializer.readSerialization(cell_in);
781: // Get the TType for this column
782: // NOTE: It's possible this call may need optimizing?
783: TType ttype = getDataTableDef().columnAt(column).getTType();
784: // Wrap it around a TObject
785: cell = new TObject(ttype, ob);
786:
787: // And close the reader.
788: cell_in.close();
789:
790: // And put in the cache and return it.
791: if (DATA_CELL_CACHING) {
792: cache.put(table_id, row, column, cell);
793: }
794: return cell;
795:
796: } catch (IOException e) {
797: Debug().writeException(e);
798: throw new Error("IOError getting cell at (" + column + ", "
799: + row + ").");
800: }
801:
802: }
803:
804: synchronized long currentUniqueID() {
805: return index_store.currentUniqueID();
806: }
807:
808: synchronized long nextUniqueID() {
809: return index_store.nextUniqueID();
810: }
811:
812: synchronized void setUniqueID(long value) {
813: index_store.setUniqueID(value);
814: }
815:
816: synchronized void dispose(boolean pending_close) throws IOException {
817: close();
818: }
819:
820: synchronized boolean drop() throws IOException {
821: if (!is_closed) {
822: close();
823: }
824:
825: Debug().write(Lvl.MESSAGE, this , "Dropping: " + getFileName());
826: data_store.delete();
827: index_store.delete();
828:
829: return true;
830: }
831:
832: void shutdownHookCleanup() {
833: // This does nothing...
834: }
835:
836: /**
837: * For diagnostic.
838: */
839: public String toString() {
840: return "[V1MasterTableDataSource: " + file_name + "]";
841: }
842:
843: }
|