0001: /**
0002: * com.mckoi.database.MasterTableDataSource 19 Nov 2000
0003: *
0004: * Mckoi SQL Database ( http://www.mckoi.com/database )
0005: * Copyright (C) 2000, 2001, 2002 Diehl and Associates, Inc.
0006: *
0007: * This program is free software; you can redistribute it and/or
0008: * modify it under the terms of the GNU General Public License
0009: * Version 2 as published by the Free Software Foundation.
0010: *
0011: * This program is distributed in the hope that it will be useful,
0012: * but WITHOUT ANY WARRANTY; without even the implied warranty of
0013: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
0014: * GNU General Public License Version 2 for more details.
0015: *
0016: * You should have received a copy of the GNU General Public License
0017: * Version 2 along with this program; if not, write to the Free Software
0018: * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
0019: *
0020: * Change Log:
0021: *
0022: *
0023: */package com.mckoi.database;
0024:
0025: import java.util.ArrayList;
0026: import java.io.*;
0027: import com.mckoi.util.IntegerListInterface;
0028: import com.mckoi.util.IntegerIterator;
0029: import com.mckoi.util.IntegerVector;
0030: import com.mckoi.util.ByteArrayUtil;
0031: import com.mckoi.util.UserTerminal;
0032: import com.mckoi.util.Cache;
0033: import com.mckoi.debug.*;
0034:
0035: /**
0036: * A master table data source provides facilities for read/writing and
0037: * maintaining low level data in a table. It provides primitive table
0038: * operations such as retrieving a cell from a table, accessing the table's
0039: * DataTableDef, accessing indexes, and providing views of transactional
0040: * versions of the data.
0041: * <p>
0042: * Logically, a master table data source contains a dynamic number of rows and
0043: * a fixed number of columns. Each row has an associated state - either
0044: * DELETED, UNCOMMITTED, COMMITTED_ADDED or COMMITTED_REMOVED. A DELETED
0045: * row is a row that can be reused by a new row added to the table.
0046: * <p>
0047: * When a new row is added to the table, it is marked as UNCOMMITTED. It is
0048: * later tagged as COMMITTED_ADDED when the transaction that caused the row
0049: * addition is committed. If a row commits a row removal, the row is tagged
0050: * as COMMITTED_REMOVED and later the row garbage collector marks the row as
0051: * DELETED when there are no remaining references to the row.
0052: * <p>
0053: * A master table also maintains a list of indexes for the table.
0054: * <p>
0055: * How the master table logical structure is translated to a form that is
0056: * stored persistantly is implementation specific. This allows us flexibility
0057: * with different types of storage schemes.
0058: *
0059: * @author Tobias Downer
0060: */
0061:
0062: abstract class MasterTableDataSource {
0063:
0064: // ---------- System information ----------
0065:
0066: /**
0067: * The global TransactionSystem object that points to the global system
0068: * that this table source belongs to.
0069: */
0070: private TransactionSystem system;
0071:
0072: /**
0073: * The StoreSystem implementation that represents the data persistence
0074: * layer.
0075: */
0076: private StoreSystem store_system;
0077:
0078: // ---------- State information ----------
0079:
0080: /**
0081: * An integer that uniquely identifies this data source within the
0082: * conglomerate.
0083: */
0084: protected int table_id;
0085:
0086: /**
0087: * True if this table source is closed.
0088: */
0089: protected boolean is_closed;
0090:
0091: // ---------- Root locking ----------
0092:
0093: /**
0094: * The number of root locks this table data source has on it.
0095: * <p>
0096: * While a MasterTableDataSource has at least 1 root lock, it may not
0097: * reclaim deleted space in the data store. A root lock means that data
0098: * is still being pointed to in this file (even possibly committed deleted
0099: * data).
0100: */
0101: private int root_lock;
0102:
0103: // ---------- Persistant data ----------
0104:
0105: /**
0106: * A DataTableDef object that describes the table topology. This includes
0107: * the name and columns of the table.
0108: */
0109: protected DataTableDef table_def;
0110:
0111: /**
0112: * A DataIndexSetDef object that describes the indexes on the table.
0113: */
0114: protected DataIndexSetDef index_def;
0115:
0116: /**
0117: * A cached TableName for this data source.
0118: */
0119: private TableName cached_table_name;
0120:
0121: /**
0122: * A multi-version representation of the table indices kept for this table
0123: * including the row list and the scheme indices. This contains the
0124: * transaction journals.
0125: */
0126: protected MultiVersionTableIndices table_indices;
0127:
0128: /**
0129: * The list of RIDList objects for each column in this table. This is
0130: * a sorting optimization.
0131: */
0132: protected RIDList[] column_rid_list;
0133:
0134: // ---------- Cached information ----------
0135:
0136: /**
0137: * Set to false to disable cell caching.
0138: */
0139: protected boolean DATA_CELL_CACHING = true;
0140:
0141: /**
0142: * A reference to the DataCellCache object.
0143: */
0144: protected final DataCellCache cache;
0145:
0146: /**
0147: * The number of columns in this table. This is a cached optimization.
0148: */
0149: protected int column_count;
0150:
0151: // --------- Parent information ----------
0152:
0153: /**
0154: * The list of all open transactions managed by the parent conglomerate.
0155: * This is a thread safe object, and is updated whenever new transactions
0156: * are created, or transactions are closed.
0157: */
0158: private OpenTransactionList open_transactions;
0159:
0160: // ---------- Row garbage collection ----------
0161:
0162: /**
0163: * Manages scanning and deleting of rows marked as deleted within this
0164: * data source.
0165: */
0166: protected MasterTableGarbageCollector garbage_collector;
0167:
0168: // ---------- Blob management ----------
0169:
0170: /**
0171: * An abstracted reference to a BlobStore for managing blob references and
0172: * blob data.
0173: */
0174: protected BlobStoreInterface blob_store_interface;
0175:
0176: // ---------- Stat keys ----------
0177:
0178: /**
0179: * The keys we use for Database.stats() for information for this table.
0180: */
0181: protected String root_lock_key;
0182: protected String total_hits_key;
0183: protected String file_hits_key;
0184: protected String delete_hits_key;
0185: protected String insert_hits_key;
0186:
0187: /**
0188: * Constructs the MasterTableDataSource. The argument is a reference
0189: * to an object that manages the list of open transactions in the
0190: * conglomerate. This object uses this information to determine how journal
0191: * entries are to be merged with the master indices.
0192: */
0193: MasterTableDataSource(TransactionSystem system,
0194: StoreSystem store_system,
0195: OpenTransactionList open_transactions,
0196: BlobStoreInterface blob_store_interface) {
0197:
0198: this .system = system;
0199: this .store_system = store_system;
0200: this .open_transactions = open_transactions;
0201: this .blob_store_interface = blob_store_interface;
0202: this .garbage_collector = new MasterTableGarbageCollector(this );
0203: this .cache = system.getDataCellCache();
0204: is_closed = true;
0205:
0206: if (DATA_CELL_CACHING) {
0207: DATA_CELL_CACHING = (cache != null);
0208: }
0209:
0210: }
0211:
0212: /**
0213: * Returns the TransactionSystem for this table.
0214: */
0215: public final TransactionSystem getSystem() {
0216: return system;
0217: }
0218:
0219: /**
0220: * Returns the DebugLogger object that can be used to log debug messages.
0221: */
0222: public final DebugLogger Debug() {
0223: return getSystem().Debug();
0224: }
0225:
0226: /**
0227: * Returns the TableName of this table source.
0228: */
0229: public TableName getTableName() {
0230: return getDataTableDef().getTableName();
0231: }
0232:
0233: /**
0234: * Returns the name of this table source.
0235: */
0236: public String getName() {
0237: return getDataTableDef().getName();
0238: }
0239:
0240: /**
0241: * Returns the schema name of this table source.
0242: */
0243: public String getSchema() {
0244: return getDataTableDef().getSchema();
0245: }
0246:
0247: /**
0248: * Returns a cached TableName for this data source.
0249: */
0250: synchronized TableName cachedTableName() {
0251: if (cached_table_name != null) {
0252: return cached_table_name;
0253: }
0254: cached_table_name = getTableName();
0255: return cached_table_name;
0256: }
0257:
0258: /**
0259: * Updates the master records from the journal logs up to the given
0260: * 'commit_id'. This could be a fairly expensive operation if there are
0261: * a lot of modifications because each change could require a lookup
0262: * of records in the data source.
0263: * <p>
0264: * NOTE: It's extremely important that when this is called, there are no
0265: * transaction open that are using the merged journal. If there is, then
0266: * a transaction may be able to see changes in a table that were made
0267: * after the transaction started.
0268: * <p>
0269: * After this method is called, it's best to update the index file
0270: * with a call to 'synchronizeIndexFiles'
0271: */
0272: synchronized void mergeJournalChanges(long commit_id) {
0273:
0274: boolean all_merged = table_indices
0275: .mergeJournalChanges(commit_id);
0276: // If all journal entries merged then schedule deleted row collection.
0277: if (all_merged && !isReadOnly()) {
0278: checkForCleanup();
0279: }
0280:
0281: }
0282:
0283: /**
0284: * Returns a list of all MasterTableJournal objects that have been
0285: * successfully committed against this table that have an 'commit_id' that
0286: * is greater or equal to the given.
0287: * <p>
0288: * This is part of the conglomerate commit check phase and will be on a
0289: * commit_lock.
0290: */
0291: synchronized MasterTableJournal[] findAllJournalsSince(
0292: long commit_id) {
0293: return table_indices.findAllJournalsSince(commit_id);
0294: }
0295:
0296: // ---------- Getters ----------
0297:
0298: /**
0299: * Returns table_id - the unique identifier for this data source.
0300: */
0301: int getTableID() {
0302: return table_id;
0303: }
0304:
0305: /**
0306: * Returns the DataTableDef object that represents the topology of this
0307: * table data source (name, columns, etc). Note that this information
0308: * can't be changed during the lifetime of a data source.
0309: */
0310: DataTableDef getDataTableDef() {
0311: return table_def;
0312: }
0313:
0314: /**
0315: * Returns the DataIndexSetDef object that represents the indexes on this
0316: * table.
0317: */
0318: DataIndexSetDef getDataIndexSetDef() {
0319: return index_def;
0320: }
0321:
0322: // ---------- Convenient statics ----------
0323:
0324: /**
0325: * Creates a unique table name to give a file. This could be changed to suit
0326: * a particular OS's style of filesystem namespace. Or it could return some
0327: * arbitarily unique number. However, for debugging purposes it's often
0328: * a good idea to return a name that a user can recognise.
0329: * <p>
0330: * The 'table_id' is a guarenteed unique number between all tables.
0331: */
0332: protected static String makeTableFileName(TransactionSystem system,
0333: int table_id, TableName table_name) {
0334:
0335: // NOTE: We may want to change this for different file systems.
0336: // For example DOS is not able to handle more than 8 characters
0337: // and is case insensitive.
0338: String tid = Integer.toString(table_id);
0339: int pad = 3 - tid.length();
0340: StringBuffer buf = new StringBuffer();
0341: for (int i = 0; i < pad; ++i) {
0342: buf.append('0');
0343: }
0344:
0345: String str = table_name.toString().replace('.', '_');
0346:
0347: // Go through each character and remove each non a-z,A-Z,0-9,_ character.
0348: // This ensure there are no strange characters in the file name that the
0349: // underlying OS may not like.
0350: StringBuffer osified_name = new StringBuffer();
0351: int count = 0;
0352: for (int i = 0; i < str.length() || count > 64; ++i) {
0353: char c = str.charAt(i);
0354: if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
0355: || (c >= '0' && c <= '9') || c == '_') {
0356: osified_name.append(c);
0357: ++count;
0358: }
0359: }
0360:
0361: return new String(buf) + tid + new String(osified_name);
0362: }
0363:
0364: // ---------- Abstract methods ----------
0365:
0366: /**
0367: * Returns a string that uniquely identifies this table within the
0368: * conglomerate context. For example, the filename of the table. This
0369: * string can be used to open and initialize the table also.
0370: */
0371: abstract String getSourceIdent();
0372:
0373: /**
0374: * Sets the record type for the given record in the table and returns the
0375: * previous state of the record. This is used to change the state of a
0376: * row in the table.
0377: */
0378: abstract int writeRecordType(int row_index, int row_state)
0379: throws IOException;
0380:
0381: /**
0382: * Reads the record state for the given record in the table.
0383: */
0384: abstract int readRecordType(int row_index) throws IOException;
0385:
0386: /**
0387: * Returns true if the record with the given index is deleted from the table.
0388: * A deleted row can not be read.
0389: */
0390: abstract boolean recordDeleted(int row_index) throws IOException;
0391:
0392: /**
0393: * Returns the raw count or rows in the table, including uncommited,
0394: * committed and deleted rows. This is basically the maximum number of rows
0395: * we can iterate through.
0396: */
0397: abstract int rawRowCount() throws IOException;
0398:
0399: /**
0400: * Removes the row at the given index so that any resources associated with
0401: * the row may be immediately available to be recycled.
0402: */
0403: abstract void internalDeleteRow(int row_index) throws IOException;
0404:
0405: /**
0406: * Creates and returns an IndexSet object that is used to create indices
0407: * for this table source. The IndexSet represents a snapshot of the
0408: * table and the given point in time.
0409: * <p>
0410: * NOTE: Not synchronized because we synchronize in the IndexStore object.
0411: */
0412: abstract IndexSet createIndexSet();
0413:
0414: /**
0415: * Commits changes made to an IndexSet returned by the 'createIndexSet'
0416: * method. This method also disposes the IndexSet so it is no longer
0417: * valid.
0418: */
0419: abstract void commitIndexSet(IndexSet index_set);
0420:
0421: /**
0422: * Adds a new row to this table and returns an index that is used to
0423: * reference this row by the 'getCellContents' method.
0424: * <p>
0425: * Note that this method will not effect the master index or column schemes.
0426: * This is a low level mechanism for adding unreferenced data into a
0427: * conglomerate. The data is referenced by committing the change where it
0428: * eventually migrates into the master index and schemes.
0429: */
0430: abstract int internalAddRow(RowData data) throws IOException;
0431:
0432: /**
0433: * Returns the cell contents of the given cell in the table. It is the
0434: * responsibility of the implemented method to perform caching as it deems
0435: * fit. Some representations may not require such extensive caching as
0436: * others.
0437: */
0438: abstract TObject internalGetCellContents(int column, int row);
0439:
0440: /**
0441: * Atomically returns the current 'unique_id' value for this table.
0442: */
0443: abstract long currentUniqueID();
0444:
0445: /**
0446: * Atomically returns the next 'unique_id' value from this table.
0447: */
0448: abstract long nextUniqueID();
0449:
0450: /**
0451: * Sets the unique id for this store. This must only be used under
0452: * extraordinary circumstances, such as restoring from a backup, or
0453: * converting from one file to another.
0454: */
0455: abstract void setUniqueID(long value);
0456:
0457: /**
0458: * Disposes of all in-memory resources associated with this table and
0459: * invalidates this object. If 'pending_drop' is true then the table is
0460: * to be disposed pending a call to 'drop'. If 'pending_drop' is true then
0461: * any persistant resources that are allocated may be freed.
0462: */
0463: abstract void dispose(boolean pending_drop) throws IOException;
0464:
0465: /**
0466: * Disposes and drops this table. If the dispose failed for any reason,
0467: * it returns false, otherwise true. If the drop failed, it should be
0468: * retried at a later time.
0469: */
0470: abstract boolean drop() throws IOException;
0471:
0472: /**
0473: * Called by the 'shutdown hook' on the conglomerate. This method should
0474: * block until the table can by put into a safe mode and then prevent any
0475: * further access to the object after it returns. It must operate very
0476: * quickly.
0477: */
0478: abstract void shutdownHookCleanup();
0479:
0480: /**
0481: * Returns true if a compact table is necessary. By default, we return
0482: * true however it is recommended this method is overwritten and the table
0483: * tested.
0484: */
0485: boolean isWorthCompacting() {
0486: return true;
0487: }
0488:
0489: /**
0490: * Creates a SelectableScheme object for the given column in this table.
0491: * This reads the index from the index set (if there is one) then wraps
0492: * it around the selectable schema as appropriate.
0493: * <p>
0494: * NOTE: This needs to be deprecated in support of composite indexes.
0495: */
0496: synchronized SelectableScheme createSelectableSchemeForColumn(
0497: IndexSet index_set, TableDataSource table, int column) {
0498: // What's the type of scheme for this column?
0499: DataTableColumnDef column_def = getDataTableDef().columnAt(
0500: column);
0501:
0502: // If the column isn't indexable then return a BlindSearch object
0503: if (!column_def.isIndexableType()) {
0504: return new BlindSearch(table, column);
0505: }
0506:
0507: String scheme_type = column_def.getIndexScheme();
0508: if (scheme_type.equals("InsertSearch")) {
0509: // Search the TableIndexDef for this column
0510: DataIndexSetDef index_set_def = getDataIndexSetDef();
0511: int index_i = index_set_def
0512: .findIndexForColumns(new String[] { column_def
0513: .getName() });
0514: return createSelectableSchemeForIndex(index_set, table,
0515: index_i);
0516: } else if (scheme_type.equals("BlindSearch")) {
0517: return new BlindSearch(table, column);
0518: } else {
0519: throw new Error("Unknown scheme type");
0520: }
0521: }
0522:
0523: /**
0524: * Creates a SelectableScheme object for the given index in the index set def
0525: * in this table.
0526: * This reads the index from the index set (if there is one) then wraps
0527: * it around the selectable schema as appropriate.
0528: */
0529: synchronized SelectableScheme createSelectableSchemeForIndex(
0530: IndexSet index_set, TableDataSource table, int index_i) {
0531:
0532: // Get the IndexDef object
0533: DataIndexDef index_def = getDataIndexSetDef().indexAt(index_i);
0534:
0535: if (index_def.getType().equals("BLIST")) {
0536: String[] cols = index_def.getColumnNames();
0537: DataTableDef table_def = getDataTableDef();
0538: if (cols.length == 1) {
0539: // If a single column
0540: int col_index = table_def.findColumnName(cols[0]);
0541: // Get the index from the index set and set up the new InsertSearch
0542: // scheme.
0543: IntegerListInterface index_list = index_set
0544: .getIndex(index_def.getPointer());
0545: InsertSearch iis = new InsertSearch(table, col_index,
0546: index_list);
0547: return iis;
0548: } else {
0549: throw new RuntimeException(
0550: "Multi-column indexes not supported at this time.");
0551: }
0552: } else {
0553: throw new RuntimeException("Unrecognised type.");
0554: }
0555:
0556: }
0557:
0558: /**
0559: * Creates a minimal TableDataSource object that represents this
0560: * MasterTableDataSource. It does not implement the 'getColumnScheme'
0561: * method.
0562: */
0563: protected TableDataSource minimalTableDataSource(
0564: final IntegerListInterface master_index) {
0565: // Make a TableDataSource that represents the master table over this
0566: // index.
0567: return new TableDataSource() {
0568: public TransactionSystem getSystem() {
0569: return system;
0570: }
0571:
0572: public DataTableDef getDataTableDef() {
0573: return MasterTableDataSource.this .getDataTableDef();
0574: }
0575:
0576: public int getRowCount() {
0577: // NOTE: Returns the number of rows in the master index before journal
0578: // entries have been made.
0579: return master_index.size();
0580: }
0581:
0582: public RowEnumeration rowEnumeration() {
0583: // NOTE: Returns iterator across master index before journal entry
0584: // changes.
0585: // Get an iterator across the row list.
0586: final IntegerIterator iterator = master_index
0587: .iterator();
0588: // Wrap it around a RowEnumeration object.
0589: return new RowEnumeration() {
0590: public boolean hasMoreRows() {
0591: return iterator.hasNext();
0592: }
0593:
0594: public int nextRowIndex() {
0595: return iterator.next();
0596: }
0597: };
0598: }
0599:
0600: public SelectableScheme getColumnScheme(int column) {
0601: throw new Error("Not implemented.");
0602: }
0603:
0604: public TObject getCellContents(int column, int row) {
0605: return MasterTableDataSource.this .getCellContents(
0606: column, row);
0607: }
0608: };
0609: }
0610:
0611: /**
0612: * Builds a complete index set on the data in this table. This must only be
0613: * called when either, a) we are under a commit lock, or b) there is a
0614: * guarentee that no concurrect access to the indexing information can happen
0615: * (such as when we are creating the table).
0616: * <p>
0617: * NOTE: We assume that the index information for this table is blank before
0618: * this method is called.
0619: */
0620: synchronized void buildIndexes() throws IOException {
0621: IndexSet index_set = createIndexSet();
0622:
0623: DataIndexSetDef index_set_def = getDataIndexSetDef();
0624:
0625: final int row_count = rawRowCount();
0626:
0627: // Master index is always on index position 0
0628: IntegerListInterface master_index = index_set.getIndex(0);
0629:
0630: // First, update the master index
0631: for (int row_index = 0; row_index < row_count; ++row_index) {
0632: // If this row isn't deleted, set the index information for it,
0633: if (!recordDeleted(row_index)) {
0634: // First add to master index
0635: boolean inserted = master_index
0636: .uniqueInsertSort(row_index);
0637: if (!inserted) {
0638: throw new RuntimeException(
0639: "Assertion failed: Master index entry was duplicated.");
0640: }
0641: }
0642: }
0643:
0644: // Commit the master index
0645: commitIndexSet(index_set);
0646:
0647: // Now go ahead and build each index in this table
0648: int index_count = index_set_def.indexCount();
0649: for (int i = 0; i < index_count; ++i) {
0650: buildIndex(i);
0651: }
0652:
0653: }
0654:
0655: /**
0656: * Builds the given index number (from the DataIndexSetDef). This must only
0657: * be called when either, a) we are under a commit lock, or b) there is a
0658: * guarentee that no concurrect access to the indexing information can happen
0659: * (such as when we are creating the table).
0660: * <p>
0661: * NOTE: We assume that the index number in this table is blank before this
0662: * method is called.
0663: */
0664: synchronized void buildIndex(final int index_number)
0665: throws IOException {
0666: DataIndexSetDef index_set_def = getDataIndexSetDef();
0667:
0668: IndexSet index_set = createIndexSet();
0669:
0670: // Master index is always on index position 0
0671: IntegerListInterface master_index = index_set.getIndex(0);
0672: // A minimal TableDataSource for constructing the indexes
0673: TableDataSource min_table_source = minimalTableDataSource(master_index);
0674:
0675: // Set up schemes for the index,
0676: SelectableScheme scheme = createSelectableSchemeForIndex(
0677: index_set, min_table_source, index_number);
0678:
0679: // Rebuild the entire index
0680: int row_count = rawRowCount();
0681: for (int row_index = 0; row_index < row_count; ++row_index) {
0682:
0683: // If this row isn't deleted, set the index information for it,
0684: if (!recordDeleted(row_index)) {
0685: scheme.insert(row_index);
0686: }
0687:
0688: }
0689:
0690: // Commit the index
0691: commitIndexSet(index_set);
0692:
0693: }
0694:
0695: /**
0696: * Adds a new transaction modification to this master table source. This
0697: * information represents the information that was added/removed in the
0698: * table in this transaction. The IndexSet object represents the changed
0699: * index information to commit to this table.
0700: * <p>
0701: * It's guarenteed that 'commit_id' additions will be sequential.
0702: */
0703: synchronized void commitTransactionChange(long commit_id,
0704: MasterTableJournal change, IndexSet index_set) {
0705: // ASSERT: Can't do this if source is read only.
0706: if (isReadOnly()) {
0707: throw new Error(
0708: "Can't commit transaction journal, table is read only.");
0709: }
0710:
0711: change.setCommitID(commit_id);
0712:
0713: try {
0714:
0715: // Add this journal to the multi version table indices log
0716: table_indices.addTransactionJournal(change);
0717:
0718: // Write the modified index set to the index store
0719: // (Updates the index file)
0720: commitIndexSet(index_set);
0721:
0722: // Update the state of the committed added data to the file system.
0723: // (Updates data to the allocation file)
0724: //
0725: // ISSUE: This can add up to a lot of changes to the allocation file and
0726: // the Java runtime could potentially be terminated in the middle of
0727: // the update. If an interruption happens the allocation information
0728: // may be incorrectly flagged. The type of corruption this would
0729: // result in would be;
0730: // + From an 'update' the updated record may disappear.
0731: // + From a 'delete' the deleted record may not delete.
0732: // + From an 'insert' the inserted record may not insert.
0733: //
0734: // Note, the possibility of this type of corruption occuring has been
0735: // minimized as best as possible given the current architecture.
0736: // Also note that is not possible for a table file to become corrupted
0737: // beyond recovery from this issue.
0738:
0739: int size = change.entries();
0740: for (int i = 0; i < size; ++i) {
0741: byte b = change.getCommand(i);
0742: int row_index = change.getRowIndex(i);
0743: // Was a row added or removed?
0744: if (MasterTableJournal.isAddCommand(b)) {
0745:
0746: // Record commit added
0747: int old_type = writeRecordType(row_index, 0x010);
0748: // Check the record was in an uncommitted state before we changed
0749: // it.
0750: if ((old_type & 0x0F0) != 0) {
0751: writeRecordType(row_index, old_type & 0x0F0);
0752: throw new Error("Record " + row_index
0753: + " of table " + this
0754: + " was not in an uncommitted state!");
0755: }
0756:
0757: } else if (MasterTableJournal.isRemoveCommand(b)) {
0758:
0759: // Record commit removed
0760: int old_type = writeRecordType(row_index, 0x020);
0761: // Check the record was in an added state before we removed it.
0762: if ((old_type & 0x0F0) != 0x010) {
0763: writeRecordType(row_index, old_type & 0x0F0);
0764: // System.out.println(change);
0765: throw new Error("Record " + row_index
0766: + " of table " + this
0767: + " was not in an added state!");
0768: }
0769: // Notify collector that this row has been marked as deleted.
0770: garbage_collector.markRowAsDeleted(row_index);
0771:
0772: }
0773: }
0774:
0775: } catch (IOException e) {
0776: Debug().writeException(e);
0777: throw new Error("IO Error: " + e.getMessage());
0778: }
0779:
0780: }
0781:
0782: /**
0783: * Rolls back a transaction change in this table source. Any rows added
0784: * to the table will be uncommited rows (type_key = 0). Those rows must be
0785: * marked as committed deleted.
0786: */
0787: synchronized void rollbackTransactionChange(
0788: MasterTableJournal change) {
0789:
0790: // ASSERT: Can't do this is source is read only.
0791: if (isReadOnly()) {
0792: throw new Error(
0793: "Can't rollback transaction journal, table is read only.");
0794: }
0795:
0796: // Any rows added in the journal are marked as committed deleted and the
0797: // journal is then discarded.
0798:
0799: try {
0800: // Mark all rows in the data_store as appropriate to the changes.
0801: int size = change.entries();
0802: for (int i = 0; i < size; ++i) {
0803: byte b = change.getCommand(i);
0804: int row_index = change.getRowIndex(i);
0805: // Make row as added or removed.
0806: if (MasterTableJournal.isAddCommand(b)) {
0807: // Record commit removed (we are rolling back remember).
0808: // int old_type = data_store.writeRecordType(row_index + 1, 0x020);
0809: int old_type = writeRecordType(row_index, 0x020);
0810: // Check the record was in an uncommitted state before we changed
0811: // it.
0812: if ((old_type & 0x0F0) != 0) {
0813: // data_store.writeRecordType(row_index + 1, old_type & 0x0F0);
0814: writeRecordType(row_index, old_type & 0x0F0);
0815: throw new Error("Record " + row_index
0816: + " was not in an "
0817: + "uncommitted state!");
0818: }
0819: // Notify collector that this row has been marked as deleted.
0820: garbage_collector.markRowAsDeleted(row_index);
0821: } else if (MasterTableJournal.isRemoveCommand(b)) {
0822: // Any journal entries marked as TABLE_REMOVE are ignored because
0823: // we are rolling back. This means the row is not logically changed.
0824: }
0825: }
0826:
0827: // The journal entry is discarded, the indices do not need to be updated
0828: // to reflect this rollback.
0829: } catch (IOException e) {
0830: Debug().writeException(e);
0831: throw new Error("IO Error: " + e.getMessage());
0832: }
0833: }
0834:
0835: /**
0836: * Returns a MutableTableDataSource object that represents this data source
0837: * at the time the given transaction started. Any modifications to the
0838: * returned table are logged in the table journal.
0839: * <p>
0840: * This is a key method in this object because it allows us to get a data
0841: * source that represents the data in the table before any modifications
0842: * may have been committed.
0843: */
0844: MutableTableDataSource createTableDataSourceAtCommit(
0845: SimpleTransaction transaction) {
0846: return createTableDataSourceAtCommit(transaction,
0847: new MasterTableJournal(getTableID()));
0848: }
0849:
0850: /**
0851: * Returns a MutableTableDataSource object that represents this data source
0852: * at the time the given transaction started, and also also makes any
0853: * modifications that are described by the journal in the table.
0854: * <p>
0855: * This method is useful for merging the changes made by a transaction into
0856: * a view of the table.
0857: */
0858: MutableTableDataSource createTableDataSourceAtCommit(
0859: SimpleTransaction transaction, MasterTableJournal journal) {
0860: return new MMutableTableDataSource(transaction, journal);
0861: }
0862:
0863: // ---------- File IO level table modification ----------
0864:
0865: /**
0866: * Sets up the DataIndexSetDef object from the information set in this object.
0867: * This will only setup a default IndexSetDef on the information in the
0868: * DataTableDef.
0869: */
0870: protected synchronized void setupDataIndexSetDef() {
0871: // Create the initial DataIndexSetDef object.
0872: index_def = new DataIndexSetDef(table_def.getTableName());
0873: for (int i = 0; i < table_def.columnCount(); ++i) {
0874: DataTableColumnDef col_def = table_def.columnAt(i);
0875: if (col_def.isIndexableType()
0876: && col_def.getIndexScheme().equals("InsertSearch")) {
0877: index_def.addDataIndexDef(new DataIndexDef(
0878: "ANON-COLUMN:" + i, new String[] { col_def
0879: .getName() }, i + 1, "BLIST", false));
0880: }
0881: }
0882: }
0883:
0884: /**
0885: * Sets up the DataTableDef. This would typically only ever be called from
0886: * the 'create' method.
0887: */
0888: protected synchronized void setupDataTableDef(DataTableDef table_def) {
0889:
0890: // Check table_id isn't too large.
0891: if ((table_id & 0x0F0000000) != 0) {
0892: throw new Error("'table_id' exceeds maximum possible keys.");
0893: }
0894:
0895: this .table_def = table_def;
0896:
0897: // The name of the table to create,
0898: TableName table_name = table_def.getTableName();
0899:
0900: // Create table indices
0901: table_indices = new MultiVersionTableIndices(getSystem(),
0902: table_name, table_def.columnCount());
0903: // The column rid list cache
0904: column_rid_list = new RIDList[table_def.columnCount()];
0905:
0906: // Setup the DataIndexSetDef
0907: setupDataIndexSetDef();
0908: }
0909:
0910: /**
0911: * Loads the internal variables.
0912: */
0913: protected synchronized void loadInternal() {
0914: // Set up the stat keys.
0915: String table_name = table_def.getName();
0916: String schema_name = table_def.getSchema();
0917: String n = table_name;
0918: if (schema_name.length() > 0) {
0919: n = schema_name + "." + table_name;
0920: }
0921: root_lock_key = "MasterTableDataSource.RootLocks." + n;
0922: total_hits_key = "MasterTableDataSource.Hits.Total." + n;
0923: file_hits_key = "MasterTableDataSource.Hits.File." + n;
0924: delete_hits_key = "MasterTableDataSource.Hits.Delete." + n;
0925: insert_hits_key = "MasterTableDataSource.Hits.Insert." + n;
0926:
0927: column_count = table_def.columnCount();
0928:
0929: is_closed = false;
0930:
0931: }
0932:
0933: /**
0934: * Returns true if this table source is closed.
0935: */
0936: synchronized boolean isClosed() {
0937: return is_closed;
0938: }
0939:
0940: /**
0941: * Returns true if the source is read only.
0942: */
0943: boolean isReadOnly() {
0944: return system.readOnlyAccess();
0945: }
0946:
0947: /**
0948: * Returns the StoreSystem object used to manage stores in the persistence
0949: * system.
0950: */
0951: protected StoreSystem storeSystem() {
0952: return store_system;
0953: }
0954:
0955: /**
0956: * Adds a new row to this table and returns an index that is used to
0957: * reference this row by the 'getCellContents' method.
0958: * <p>
0959: * Note that this method will not effect the master index or column schemes.
0960: * This is a low level mechanism for adding unreferenced data into a
0961: * conglomerate. The data is referenced by committing the change where it
0962: * eventually migrates into the master index and schemes.
0963: */
0964: int addRow(RowData data) throws IOException {
0965: int row_number;
0966:
0967: synchronized (this ) {
0968:
0969: row_number = internalAddRow(data);
0970:
0971: } // synchronized
0972:
0973: // Update stats
0974: getSystem().stats().increment(insert_hits_key);
0975:
0976: // Return the record index of the new data in the table
0977: return row_number;
0978: }
0979:
0980: /**
0981: * Actually deletes the row from the table. This is a permanent removal of
0982: * the row from the table. After this method is called, the row can not
0983: * be retrieved again. This is generally only used by the row garbage
0984: * collector.
0985: * <p>
0986: * There is no checking in this method.
0987: */
0988: private synchronized void doHardRowRemove(int row_index)
0989: throws IOException {
0990:
0991: // If we have a rid_list for any of the columns, then update the indexing
0992: // there,
0993: for (int i = 0; i < column_count; ++i) {
0994: RIDList rid_list = column_rid_list[i];
0995: if (rid_list != null) {
0996: rid_list.removeRID(row_index);
0997: }
0998: }
0999:
1000: // Internally delete the row,
1001: internalDeleteRow(row_index);
1002:
1003: // Update stats
1004: system.stats().increment(delete_hits_key);
1005:
1006: }
1007:
1008: /**
1009: * Permanently removes a row from this table. This must only be used when
1010: * it is determined that a transaction does not reference this row, and
1011: * that an open result set does not reference this row. This will remove
1012: * the row permanently from the underlying file representation. Calls to
1013: * 'getCellContents(col, row)' where row is deleted will be undefined after
1014: * this method is called.
1015: * <p>
1016: * Note that the removed row must not be contained within the master index,
1017: * or be referenced by the index schemes, or be referenced in the
1018: * transaction modification list.
1019: */
1020: synchronized void hardRemoveRow(final int record_index)
1021: throws IOException {
1022: // ASSERTION: We are not under a root lock.
1023: if (!isRootLocked()) {
1024: // int type_key = data_store.readRecordType(record_index + 1);
1025: int type_key = readRecordType(record_index);
1026: // Check this record is marked as committed removed.
1027: if ((type_key & 0x0F0) == 0x020) {
1028: doHardRowRemove(record_index);
1029: } else {
1030: throw new Error(
1031: "Row isn't marked as committed removed: "
1032: + record_index);
1033: }
1034: } else {
1035: throw new Error("Assertion failed: "
1036: + "Can't remove row, table is under a root lock.");
1037: }
1038: }
1039:
1040: /**
1041: * Checks the given record index, and if it's possible to reclaim it then
1042: * it does so here. Rows are only removed if they are marked as committed
1043: * removed.
1044: */
1045: synchronized boolean hardCheckAndReclaimRow(final int record_index)
1046: throws IOException {
1047: // ASSERTION: We are not under a root lock.
1048: if (!isRootLocked()) {
1049: // Row already deleted?
1050: if (!recordDeleted(record_index)) {
1051: int type_key = readRecordType(record_index);
1052: // Check this record is marked as committed removed.
1053: if ((type_key & 0x0F0) == 0x020) {
1054: // System.out.println("[" + getName() + "] " +
1055: // "Hard Removing: " + record_index);
1056: doHardRowRemove(record_index);
1057: return true;
1058: }
1059: }
1060: return false;
1061: } else {
1062: throw new Error("Assertion failed: "
1063: + "Can't remove row, table is under a root lock.");
1064: }
1065: }
1066:
1067: /**
1068: * Returns the record type of the given record index. Returns a type that
1069: * is compatible with RawDiagnosticTable record type.
1070: */
1071: synchronized int recordTypeInfo(int record_index)
1072: throws IOException {
1073: // ++record_index;
1074: if (recordDeleted(record_index)) {
1075: return RawDiagnosticTable.DELETED;
1076: }
1077: int type_key = readRecordType(record_index) & 0x0F0;
1078: if (type_key == 0) {
1079: return RawDiagnosticTable.UNCOMMITTED;
1080: } else if (type_key == 0x010) {
1081: return RawDiagnosticTable.COMMITTED_ADDED;
1082: } else if (type_key == 0x020) {
1083: return RawDiagnosticTable.COMMITTED_REMOVED;
1084: }
1085: return RawDiagnosticTable.RECORD_STATE_ERROR;
1086:
1087: }
1088:
1089: /**
1090: * This is called by the 'open' method. It performs a scan of the records
1091: * and marks any rows that are uncommitted as deleted. It also checks
1092: * that the row is not within the master index.
1093: */
1094: protected synchronized void doOpeningScan() throws IOException {
1095: long in_time = System.currentTimeMillis();
1096:
1097: // ASSERTION: No root locks and no pending transaction changes,
1098: // VERY important we assert there's no pending transactions.
1099: if (isRootLocked() || hasTransactionChangesPending()) {
1100: // This shouldn't happen if we are calling from 'open'.
1101: throw new RuntimeException(
1102: "Odd, we are root locked or have pending journal changes.");
1103: }
1104:
1105: // This is pointless if we are in read only mode.
1106: if (!isReadOnly()) {
1107: // A journal of index changes during this scan...
1108: MasterTableJournal journal = new MasterTableJournal();
1109:
1110: // Get the master index of rows in this table
1111: IndexSet index_set = createIndexSet();
1112: IntegerListInterface master_index = index_set.getIndex(0);
1113:
1114: // NOTE: We assume the index information is correct and that the
1115: // allocation information is potentially bad.
1116:
1117: int row_count = rawRowCount();
1118: for (int i = 0; i < row_count; ++i) {
1119: // Is this record marked as deleted?
1120: if (!recordDeleted(i)) {
1121: // Get the type flags for this record.
1122: int type = recordTypeInfo(i);
1123: // Check if this record is marked as committed removed, or is an
1124: // uncommitted record.
1125: if (type == RawDiagnosticTable.COMMITTED_REMOVED
1126: || type == RawDiagnosticTable.UNCOMMITTED) {
1127: // Check it's not in the master index...
1128: if (!master_index.contains(i)) {
1129: // Delete it.
1130: doHardRowRemove(i);
1131: } else {
1132: Debug()
1133: .write(
1134: Lvl.ERROR,
1135: this ,
1136: "Inconsistant: Row is indexed but marked as "
1137: + "removed or uncommitted.");
1138: Debug().write(
1139: Lvl.ERROR,
1140: this ,
1141: "Row: " + i + " Type: " + type
1142: + " Table: "
1143: + getTableName());
1144: // Mark the row as committed added because it is in the index.
1145: writeRecordType(i, 0x010);
1146:
1147: }
1148: } else {
1149: // Must be committed added. Check it's indexed.
1150: if (!master_index.contains(i)) {
1151: // Not indexed, so data is inconsistant.
1152: Debug()
1153: .write(Lvl.ERROR, this ,
1154: "Inconsistant: Row committed added but not in master index.");
1155: Debug().write(
1156: Lvl.ERROR,
1157: this ,
1158: "Row: " + i + " Type: " + type
1159: + " Table: "
1160: + getTableName());
1161: // Mark the row as committed removed because it is not in the
1162: // index.
1163: writeRecordType(i, 0x020);
1164:
1165: }
1166: }
1167: } else { // if deleted
1168: // Check this record isn't in the master index.
1169: if (master_index.contains(i)) {
1170: // It's in the master index which is wrong! We should remake the
1171: // indices.
1172: Debug()
1173: .write(Lvl.ERROR, this ,
1174: "Inconsistant: Row is removed but in index.");
1175: Debug().write(
1176: Lvl.ERROR,
1177: this ,
1178: "Row: " + i + " Table: "
1179: + getTableName());
1180: // Mark the row as committed added because it is in the index.
1181: writeRecordType(i, 0x010);
1182:
1183: }
1184: }
1185: } // for (int i = 0 ; i < row_count; ++i)
1186:
1187: // Dispose the index set
1188: index_set.dispose();
1189:
1190: }
1191:
1192: long bench_time = System.currentTimeMillis() - in_time;
1193: if (Debug().isInterestedIn(Lvl.INFORMATION)) {
1194: Debug().write(
1195: Lvl.INFORMATION,
1196: this ,
1197: "Opening scan for " + toString() + " ("
1198: + getTableName() + ") took " + bench_time
1199: + "ms.");
1200: }
1201:
1202: }
1203:
1204: /**
1205: * Returns an implementation of RawDiagnosticTable that we can use to
1206: * diagnose problems with the data in this source.
1207: */
1208: RawDiagnosticTable getRawDiagnosticTable() {
1209: return new MRawDiagnosticTable();
1210: }
1211:
1212: /**
1213: * Returns the cell contents of the given cell in the table. This will
1214: * look up the cell in the file if it can't be found in the cell cache. This
1215: * method is undefined if row has been removed or was not returned by
1216: * the 'addRow' method.
1217: */
1218: TObject getCellContents(int column, int row) {
1219: if (row < 0) {
1220: throw new Error("'row' is < 0");
1221: }
1222: return internalGetCellContents(column, row);
1223: }
1224:
1225: /**
1226: * Grabs a root lock on this table.
1227: * <p>
1228: * While a MasterTableDataSource has at least 1 root lock, it may not
1229: * reclaim deleted space in the data store. A root lock means that data
1230: * is still being pointed to in this file (even possibly committed deleted
1231: * data).
1232: */
1233: synchronized void addRootLock() {
1234: system.stats().increment(root_lock_key);
1235: ++root_lock;
1236: }
1237:
1238: /**
1239: * Removes a root lock from this table.
1240: * <p>
1241: * While a MasterTableDataSource has at least 1 root lock, it may not
1242: * reclaim deleted space in the data store. A root lock means that data
1243: * is still being pointed to in this file (even possibly committed deleted
1244: * data).
1245: */
1246: synchronized void removeRootLock() {
1247: if (!is_closed) {
1248: system.stats().decrement(root_lock_key);
1249: if (root_lock == 0) {
1250: throw new Error("Too many root locks removed!");
1251: }
1252: --root_lock;
1253: // If the last lock is removed, schedule a possible collection.
1254: if (root_lock == 0) {
1255: checkForCleanup();
1256: }
1257: }
1258: }
1259:
1260: /**
1261: * Returns true if the table is currently under a root lock (has 1 or more
1262: * root locks on it).
1263: */
1264: synchronized boolean isRootLocked() {
1265: return root_lock > 0;
1266: }
1267:
1268: /**
1269: * Clears all root locks on the table. Should only be used during cleanup
1270: * of the table and will by definition invalidate the table.
1271: */
1272: protected synchronized void clearAllRootLocks() {
1273: root_lock = 0;
1274: }
1275:
1276: /**
1277: * Checks to determine if it is safe to clean up any resources in the
1278: * table, and if it is safe to do so, the space is reclaimed.
1279: */
1280: abstract void checkForCleanup();
1281:
1282: synchronized String transactionChangeString() {
1283: return table_indices.transactionChangeString();
1284: }
1285:
1286: /**
1287: * Returns true if this table has any journal modifications that have not
1288: * yet been incorporated into master index.
1289: */
1290: synchronized boolean hasTransactionChangesPending() {
1291: return table_indices.hasTransactionChangesPending();
1292: }
1293:
1294: // ---------- Inner classes ----------
1295:
1296: /**
1297: * A RawDiagnosticTable implementation that provides direct access to the
1298: * root data of this table source bypassing any indexing schemes. This
1299: * interface allows for the inspection and repair of data files.
1300: */
1301: private final class MRawDiagnosticTable implements
1302: RawDiagnosticTable {
1303:
1304: // ---------- Implemented from RawDiagnosticTable -----------
1305:
1306: public int physicalRecordCount() {
1307: try {
1308: return rawRowCount();
1309: } catch (IOException e) {
1310: throw new Error(e.getMessage());
1311: }
1312: }
1313:
1314: public DataTableDef getDataTableDef() {
1315: return MasterTableDataSource.this .getDataTableDef();
1316: }
1317:
1318: public int recordState(int record_index) {
1319: try {
1320: return recordTypeInfo(record_index);
1321: } catch (IOException e) {
1322: throw new Error(e.getMessage());
1323: }
1324: }
1325:
1326: public int recordSize(int record_index) {
1327: return -1;
1328: }
1329:
1330: public TObject getCellContents(int column, int record_index) {
1331: return MasterTableDataSource.this .getCellContents(column,
1332: record_index);
1333: }
1334:
1335: public String recordMiscInformation(int record_index) {
1336: return null;
1337: }
1338:
1339: }
1340:
1341: /**
1342: * A MutableTableDataSource object as returned by the
1343: * 'createTableDataSourceAtCommit' method.
1344: * <p>
1345: * NOTE: This object is NOT thread-safe and it is assumed any use of this
1346: * object will be thread exclusive. This is okay because multiple
1347: * instances of this object can be created on the same MasterTableDataSource
1348: * if multi-thread access to a MasterTableDataSource is desirable.
1349: */
1350: private final class MMutableTableDataSource implements
1351: MutableTableDataSource {
1352:
1353: /**
1354: * The Transaction object that this MutableTableDataSource was
1355: * generated from. This reference should be used only to query
1356: * database constraint information.
1357: */
1358: private SimpleTransaction transaction;
1359:
1360: /**
1361: * True if the transaction is read-only.
1362: */
1363: private boolean tran_read_only;
1364:
1365: /**
1366: * The name of this table.
1367: */
1368: private TableName table_name;
1369:
1370: /**
1371: * The 'recovery point' to which the row index in this table source has
1372: * rebuilt to.
1373: */
1374: private int row_list_rebuild;
1375:
1376: /**
1377: * The index that represents the rows that are within this
1378: * table data source within this transaction.
1379: */
1380: private IntegerListInterface row_list;
1381:
1382: /**
1383: * The 'recovery point' to which the schemes in this table source have
1384: * rebuilt to.
1385: */
1386: private int[] scheme_rebuilds;
1387:
1388: /**
1389: * The IndexSet for this mutable table source.
1390: */
1391: private IndexSet index_set;
1392:
1393: /**
1394: * The SelectableScheme array that represents the schemes for the
1395: * columns within this transaction.
1396: */
1397: private SelectableScheme[] column_schemes;
1398:
1399: /**
1400: * A journal of changes to this source since it was created.
1401: */
1402: private MasterTableJournal table_journal;
1403:
1404: /**
1405: * The last time any changes to the journal were check for referential
1406: * integrity violations.
1407: */
1408: private int last_entry_ri_check;
1409:
1410: /**
1411: * Constructs the data source.
1412: */
1413: public MMutableTableDataSource(SimpleTransaction transaction,
1414: MasterTableJournal journal) {
1415: this .transaction = transaction;
1416: this .index_set = transaction
1417: .getIndexSetForTable(MasterTableDataSource.this );
1418: int col_count = getDataTableDef().columnCount();
1419: this .table_name = getDataTableDef().getTableName();
1420: this .tran_read_only = transaction.isReadOnly();
1421: row_list_rebuild = 0;
1422: scheme_rebuilds = new int[col_count];
1423: column_schemes = new SelectableScheme[col_count];
1424: table_journal = journal;
1425: last_entry_ri_check = table_journal.entries();
1426: }
1427:
1428: /**
1429: * Executes an update referential action. If the update action is
1430: * "NO ACTION", and the constraint is INITIALLY_IMMEDIATE, and the new key
1431: * doesn't exist in the referral table, an exception is thrown.
1432: */
1433: private void executeUpdateReferentialAction(
1434: Transaction.ColumnGroupReference constraint,
1435: TObject[] original_key, TObject[] new_key,
1436: QueryContext context) {
1437:
1438: final String update_rule = constraint.update_rule;
1439: if (update_rule.equals("NO ACTION")
1440: && constraint.deferred != Transaction.INITIALLY_IMMEDIATE) {
1441: // Constraint check is deferred
1442: return;
1443: }
1444:
1445: // So either update rule is not NO ACTION, or if it is we are initially
1446: // immediate.
1447: MutableTableDataSource key_table = transaction
1448: .getTable(constraint.key_table_name);
1449: DataTableDef table_def = key_table.getDataTableDef();
1450: int[] key_cols = TableDataConglomerate.findColumnIndices(
1451: table_def, constraint.key_columns);
1452: IntegerVector key_entries = TableDataConglomerate.findKeys(
1453: key_table, key_cols, original_key);
1454:
1455: // Are there keys effected?
1456: if (key_entries.size() > 0) {
1457: if (update_rule.equals("NO ACTION")) {
1458: // Throw an exception;
1459: throw new DatabaseConstraintViolationException(
1460: DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION,
1461: TableDataConglomerate
1462: .deferredString(constraint.deferred)
1463: + " foreign key constraint violation on update ("
1464: + constraint.name
1465: + ") Columns = "
1466: + constraint.key_table_name
1467: .toString()
1468: + "( "
1469: + TableDataConglomerate
1470: .stringColumnList(constraint.key_columns)
1471: + " ) -> "
1472: + constraint.ref_table_name
1473: .toString()
1474: + "( "
1475: + TableDataConglomerate
1476: .stringColumnList(constraint.ref_columns)
1477: + " )");
1478: } else {
1479: // Perform a referential action on each updated key
1480: int sz = key_entries.size();
1481: for (int i = 0; i < sz; ++i) {
1482: int row_index = key_entries.intAt(i);
1483: RowData row_data = new RowData(key_table);
1484: row_data.setFromRow(row_index);
1485: if (update_rule.equals("CASCADE")) {
1486: // Update the keys
1487: for (int n = 0; n < key_cols.length; ++n) {
1488: row_data.setColumnData(key_cols[n],
1489: new_key[n]);
1490: }
1491: key_table.updateRow(row_index, row_data);
1492: } else if (update_rule.equals("SET NULL")) {
1493: for (int n = 0; n < key_cols.length; ++n) {
1494: row_data.setColumnToNull(key_cols[n]);
1495: }
1496: key_table.updateRow(row_index, row_data);
1497: } else if (update_rule.equals("SET DEFAULT")) {
1498: for (int n = 0; n < key_cols.length; ++n) {
1499: row_data.setColumnToDefault(
1500: key_cols[n], context);
1501: }
1502: key_table.updateRow(row_index, row_data);
1503: } else {
1504: throw new RuntimeException(
1505: "Do not understand referential action: "
1506: + update_rule);
1507: }
1508: }
1509: // Check referential integrity of modified table,
1510: key_table.constraintIntegrityCheck();
1511: }
1512: }
1513: }
1514:
1515: /**
1516: * Executes a delete referential action. If the delete action is
1517: * "NO ACTION", and the constraint is INITIALLY_IMMEDIATE, and the new key
1518: * doesn't exist in the referral table, an exception is thrown.
1519: */
1520: private void executeDeleteReferentialAction(
1521: Transaction.ColumnGroupReference constraint,
1522: TObject[] original_key, QueryContext context) {
1523:
1524: final String delete_rule = constraint.delete_rule;
1525: if (delete_rule.equals("NO ACTION")
1526: && constraint.deferred != Transaction.INITIALLY_IMMEDIATE) {
1527: // Constraint check is deferred
1528: return;
1529: }
1530:
1531: // So either delete rule is not NO ACTION, or if it is we are initially
1532: // immediate.
1533: MutableTableDataSource key_table = transaction
1534: .getTable(constraint.key_table_name);
1535: DataTableDef table_def = key_table.getDataTableDef();
1536: int[] key_cols = TableDataConglomerate.findColumnIndices(
1537: table_def, constraint.key_columns);
1538: IntegerVector key_entries = TableDataConglomerate.findKeys(
1539: key_table, key_cols, original_key);
1540:
1541: // Are there keys effected?
1542: if (key_entries.size() > 0) {
1543: if (delete_rule.equals("NO ACTION")) {
1544: // Throw an exception;
1545: throw new DatabaseConstraintViolationException(
1546: DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION,
1547: TableDataConglomerate
1548: .deferredString(constraint.deferred)
1549: + " foreign key constraint violation on delete ("
1550: + constraint.name
1551: + ") Columns = "
1552: + constraint.key_table_name
1553: .toString()
1554: + "( "
1555: + TableDataConglomerate
1556: .stringColumnList(constraint.key_columns)
1557: + " ) -> "
1558: + constraint.ref_table_name
1559: .toString()
1560: + "( "
1561: + TableDataConglomerate
1562: .stringColumnList(constraint.ref_columns)
1563: + " )");
1564: } else {
1565: // Perform a referential action on each updated key
1566: int sz = key_entries.size();
1567: for (int i = 0; i < sz; ++i) {
1568: int row_index = key_entries.intAt(i);
1569: RowData row_data = new RowData(key_table);
1570: row_data.setFromRow(row_index);
1571: if (delete_rule.equals("CASCADE")) {
1572: // Cascade the removal of the referenced rows
1573: key_table.removeRow(row_index);
1574: } else if (delete_rule.equals("SET NULL")) {
1575: for (int n = 0; n < key_cols.length; ++n) {
1576: row_data.setColumnToNull(key_cols[n]);
1577: }
1578: key_table.updateRow(row_index, row_data);
1579: } else if (delete_rule.equals("SET DEFAULT")) {
1580: for (int n = 0; n < key_cols.length; ++n) {
1581: row_data.setColumnToDefault(
1582: key_cols[n], context);
1583: }
1584: key_table.updateRow(row_index, row_data);
1585: } else {
1586: throw new RuntimeException(
1587: "Do not understand referential action: "
1588: + delete_rule);
1589: }
1590: }
1591: // Check referential integrity of modified table,
1592: key_table.constraintIntegrityCheck();
1593: }
1594: }
1595: }
1596:
1597: /**
1598: * Returns the entire row list for this table. This will request this
1599: * information from the master source.
1600: */
1601: private IntegerListInterface getRowIndexList() {
1602: if (row_list == null) {
1603: row_list = index_set.getIndex(0);
1604: }
1605: return row_list;
1606: }
1607:
1608: /**
1609: * Ensures that the row list is as current as the latest journal change.
1610: * We can be assured that when this is called, no journal changes will
1611: * occur concurrently. However we still need to synchronize because
1612: * multiple reads are valid.
1613: */
1614: private void ensureRowIndexListCurrent() {
1615: int rebuild_index = row_list_rebuild;
1616: int journal_count = table_journal.entries();
1617: while (rebuild_index < journal_count) {
1618: byte command = table_journal.getCommand(rebuild_index);
1619: int row_index = table_journal
1620: .getRowIndex(rebuild_index);
1621: if (MasterTableJournal.isAddCommand(command)) {
1622: // Add to 'row_list'.
1623: boolean b = getRowIndexList().uniqueInsertSort(
1624: row_index);
1625: if (b == false) {
1626: throw new Error(
1627: "Row index already used in this table ("
1628: + row_index + ")");
1629: }
1630: } else if (MasterTableJournal.isRemoveCommand(command)) {
1631: // Remove from 'row_list'
1632: boolean b = getRowIndexList().removeSort(row_index);
1633: if (b == false) {
1634: throw new Error(
1635: "Row index removed that wasn't in this table!");
1636: }
1637: } else {
1638: throw new Error("Unrecognised journal command.");
1639: }
1640: ++rebuild_index;
1641: }
1642: // It's now current (row_list_rebuild == journal_count);
1643: row_list_rebuild = rebuild_index;
1644: }
1645:
1646: /**
1647: * Ensures that the scheme column index is as current as the latest
1648: * journal change.
1649: */
1650: private void ensureColumnSchemeCurrent(int column) {
1651: SelectableScheme scheme = column_schemes[column];
1652: // NOTE: We should be assured that no write operations can occur over
1653: // this section of code because writes are exclusive operations
1654: // within a transaction.
1655: // Are there journal entries pending on this scheme since?
1656: int rebuild_index = scheme_rebuilds[column];
1657: int journal_count = table_journal.entries();
1658: while (rebuild_index < journal_count) {
1659: byte command = table_journal.getCommand(rebuild_index);
1660: int row_index = table_journal
1661: .getRowIndex(rebuild_index);
1662: if (MasterTableJournal.isAddCommand(command)) {
1663: scheme.insert(row_index);
1664: } else if (MasterTableJournal.isRemoveCommand(command)) {
1665: scheme.remove(row_index);
1666: } else {
1667: throw new Error("Unrecognised journal command.");
1668: }
1669: ++rebuild_index;
1670: }
1671: scheme_rebuilds[column] = rebuild_index;
1672: }
1673:
1674: // ---------- Implemented from MutableTableDataSource ----------
1675:
1676: public TransactionSystem getSystem() {
1677: return MasterTableDataSource.this .getSystem();
1678: }
1679:
1680: public DataTableDef getDataTableDef() {
1681: return MasterTableDataSource.this .getDataTableDef();
1682: }
1683:
1684: public int getRowCount() {
1685: // Ensure the row list is up to date.
1686: ensureRowIndexListCurrent();
1687: return getRowIndexList().size();
1688: }
1689:
1690: public RowEnumeration rowEnumeration() {
1691: // Ensure the row list is up to date.
1692: ensureRowIndexListCurrent();
1693: // Get an iterator across the row list.
1694: final IntegerIterator iterator = getRowIndexList()
1695: .iterator();
1696: // Wrap it around a RowEnumeration object.
1697: return new RowEnumeration() {
1698: public boolean hasMoreRows() {
1699: return iterator.hasNext();
1700: }
1701:
1702: public int nextRowIndex() {
1703: return iterator.next();
1704: }
1705: };
1706: }
1707:
1708: public TObject getCellContents(int column, int row) {
1709: return MasterTableDataSource.this .getCellContents(column,
1710: row);
1711: }
1712:
1713: // NOTE: Returns an immutable version of the scheme...
1714: public SelectableScheme getColumnScheme(int column) {
1715: SelectableScheme scheme = column_schemes[column];
1716: // Cache the scheme in this object.
1717: if (scheme == null) {
1718: scheme = createSelectableSchemeForColumn(index_set,
1719: this , column);
1720: column_schemes[column] = scheme;
1721: }
1722:
1723: // Update the underlying scheme to the most current version.
1724: ensureColumnSchemeCurrent(column);
1725:
1726: return scheme;
1727: }
1728:
1729: // ---------- Table Modification ----------
1730:
1731: public int addRow(RowData row_data) {
1732:
1733: // Check the transaction isn't read only.
1734: if (tran_read_only) {
1735: throw new RuntimeException("Transaction is read only.");
1736: }
1737:
1738: // Check this isn't a read only source
1739: if (isReadOnly()) {
1740: throw new Error("Can not add row - table is read only.");
1741: }
1742:
1743: // Add to the master.
1744: int row_index;
1745: try {
1746: row_index = MasterTableDataSource.this .addRow(row_data);
1747: } catch (IOException e) {
1748: Debug().writeException(e);
1749: throw new Error("IO Error: " + e.getMessage());
1750: }
1751:
1752: // Note this doesn't need to be synchronized because we are exclusive on
1753: // this table.
1754: // Add this change to the table journal.
1755: table_journal.addEntry(MasterTableJournal.TABLE_ADD,
1756: row_index);
1757:
1758: return row_index;
1759: }
1760:
1761: public void removeRow(int row_index) {
1762:
1763: // Check the transaction isn't read only.
1764: if (tran_read_only) {
1765: throw new RuntimeException("Transaction is read only.");
1766: }
1767:
1768: // Check this isn't a read only source
1769: if (isReadOnly()) {
1770: throw new Error(
1771: "Can not remove row - table is read only.");
1772: }
1773:
1774: // NOTE: This must <b>NOT</b> call 'removeRow' in MasterTableDataSource.
1775: // We do not want to delete a row permanently from the underlying
1776: // file because the transaction using this data source may yet decide
1777: // to roll back the change and not delete the row.
1778:
1779: // Note this doesn't need to be synchronized because we are exclusive on
1780: // this table.
1781: // Add this change to the table journal.
1782: table_journal.addEntry(MasterTableJournal.TABLE_REMOVE,
1783: row_index);
1784:
1785: }
1786:
1787: public int updateRow(int row_index, RowData row_data) {
1788:
1789: // Check the transaction isn't read only.
1790: if (tran_read_only) {
1791: throw new RuntimeException("Transaction is read only.");
1792: }
1793:
1794: // Check this isn't a read only source
1795: if (isReadOnly()) {
1796: throw new Error(
1797: "Can not update row - table is read only.");
1798: }
1799:
1800: // Note this doesn't need to be synchronized because we are exclusive on
1801: // this table.
1802: // Add this change to the table journal.
1803: table_journal.addEntry(
1804: MasterTableJournal.TABLE_UPDATE_REMOVE, row_index);
1805:
1806: // Add to the master.
1807: int new_row_index;
1808: try {
1809: new_row_index = MasterTableDataSource.this
1810: .addRow(row_data);
1811: } catch (IOException e) {
1812: Debug().writeException(e);
1813: throw new Error("IO Error: " + e.getMessage());
1814: }
1815:
1816: // Note this doesn't need to be synchronized because we are exclusive on
1817: // this table.
1818: // Add this change to the table journal.
1819: table_journal.addEntry(MasterTableJournal.TABLE_UPDATE_ADD,
1820: new_row_index);
1821:
1822: return new_row_index;
1823: }
1824:
1825: public void flushIndexChanges() {
1826: ensureRowIndexListCurrent();
1827: // This will flush all of the column schemes
1828: for (int i = 0; i < column_schemes.length; ++i) {
1829: getColumnScheme(i);
1830: }
1831: }
1832:
1833: public void constraintIntegrityCheck() {
1834: try {
1835:
1836: // Early exit condition
1837: if (last_entry_ri_check == table_journal.entries()) {
1838: return;
1839: }
1840:
1841: // This table name
1842: DataTableDef table_def = getDataTableDef();
1843: TableName table_name = table_def.getTableName();
1844: QueryContext context = new SystemQueryContext(
1845: transaction, table_name.getSchema());
1846:
1847: // Are there any added, deleted or updated entries in the journal since
1848: // we last checked?
1849: IntegerVector rows_updated = new IntegerVector();
1850: IntegerVector rows_deleted = new IntegerVector();
1851: IntegerVector rows_added = new IntegerVector();
1852:
1853: int size = table_journal.entries();
1854: for (int i = last_entry_ri_check; i < size; ++i) {
1855: byte tc = table_journal.getCommand(i);
1856: int row_index = table_journal.getRowIndex(i);
1857: if (tc == MasterTableJournal.TABLE_REMOVE
1858: || tc == MasterTableJournal.TABLE_UPDATE_REMOVE) {
1859: rows_deleted.addInt(row_index);
1860: // If this is in the rows_added list, remove it from rows_added
1861: int ra_i = rows_added.indexOf(row_index);
1862: if (ra_i != -1) {
1863: rows_added.removeIntAt(ra_i);
1864: }
1865: } else if (tc == MasterTableJournal.TABLE_ADD
1866: || tc == MasterTableJournal.TABLE_UPDATE_ADD) {
1867: rows_added.addInt(row_index);
1868: }
1869:
1870: if (tc == MasterTableJournal.TABLE_UPDATE_REMOVE) {
1871: rows_updated.addInt(row_index);
1872: } else if (tc == MasterTableJournal.TABLE_UPDATE_ADD) {
1873: rows_updated.addInt(row_index);
1874: }
1875: }
1876:
1877: // Were there any updates or deletes?
1878: if (rows_deleted.size() > 0) {
1879: // Get all references on this table
1880: Transaction.ColumnGroupReference[] foreign_constraints = Transaction
1881: .queryTableImportedForeignKeyReferences(
1882: transaction, table_name);
1883:
1884: // For each foreign constraint
1885: for (int n = 0; n < foreign_constraints.length; ++n) {
1886: Transaction.ColumnGroupReference constraint = foreign_constraints[n];
1887: // For each deleted/updated record in the table,
1888: for (int i = 0; i < rows_deleted.size(); ++i) {
1889: int row_index = rows_deleted.intAt(i);
1890: // What was the key before it was updated/deleted
1891: int[] cols = TableDataConglomerate
1892: .findColumnIndices(table_def,
1893: constraint.ref_columns);
1894: TObject[] original_key = new TObject[cols.length];
1895: int null_count = 0;
1896: for (int p = 0; p < cols.length; ++p) {
1897: original_key[p] = getCellContents(
1898: cols[p], row_index);
1899: if (original_key[p].isNull()) {
1900: ++null_count;
1901: }
1902: }
1903: // Check the original key isn't null
1904: if (null_count != cols.length) {
1905: // Is is an update?
1906: int update_index = rows_updated
1907: .indexOf(row_index);
1908: if (update_index != -1) {
1909: // Yes, this is an update
1910: int row_index_add = rows_updated
1911: .intAt(update_index + 1);
1912: // It must be an update, so first see if the change caused any
1913: // of the keys to change.
1914: boolean key_changed = false;
1915: TObject[] key_updated_to = new TObject[cols.length];
1916: for (int p = 0; p < cols.length; ++p) {
1917: key_updated_to[p] = getCellContents(
1918: cols[p], row_index_add);
1919: if (original_key[p]
1920: .compareTo(key_updated_to[p]) != 0) {
1921: key_changed = true;
1922: }
1923: }
1924: if (key_changed) {
1925: // Allow the delete, and execute the action,
1926: // What did the key update to?
1927: executeUpdateReferentialAction(
1928: constraint,
1929: original_key,
1930: key_updated_to, context);
1931: }
1932: // If the key didn't change, we don't need to do anything.
1933: } else {
1934: // No, so it must be a delete,
1935: // This will look at the referencee table and if it contains
1936: // the key, work out what to do with it.
1937: executeDeleteReferentialAction(
1938: constraint, original_key,
1939: context);
1940: }
1941:
1942: } // If the key isn't null
1943:
1944: } // for each deleted rows
1945:
1946: } // for each foreign key reference to this table
1947:
1948: }
1949:
1950: // Were there any rows added (that weren't deleted)?
1951: if (rows_added.size() > 0) {
1952: int[] row_indices = rows_added.toIntArray();
1953:
1954: // Check for any field constraint violations in the added rows
1955: TableDataConglomerate
1956: .checkFieldConstraintViolations(
1957: transaction, this , row_indices);
1958: // Check this table, adding the given row_index, immediate
1959: TableDataConglomerate.checkAddConstraintViolations(
1960: transaction, this , row_indices,
1961: Transaction.INITIALLY_IMMEDIATE);
1962: }
1963:
1964: } catch (DatabaseConstraintViolationException e) {
1965:
1966: // If a constraint violation, roll back the changes since the last
1967: // check.
1968: int rollback_point = table_journal.entries()
1969: - last_entry_ri_check;
1970: if (row_list_rebuild <= rollback_point) {
1971: table_journal.rollbackEntries(rollback_point);
1972: } else {
1973: System.out
1974: .println("WARNING: rebuild_pointer is after rollback point so we can't "
1975: + "rollback to the point before the constraint violation.");
1976: }
1977:
1978: throw e;
1979:
1980: } finally {
1981: // Make sure we update the 'last_entry_ri_check' variable
1982: last_entry_ri_check = table_journal.entries();
1983: }
1984:
1985: }
1986:
1987: public MasterTableJournal getJournal() {
1988: return table_journal;
1989: }
1990:
1991: public void dispose() {
1992: // Dispose and invalidate the schemes
1993: // This is really a safety measure to ensure the schemes can't be
1994: // used outside the scope of the lifetime of this object.
1995: for (int i = 0; i < column_schemes.length; ++i) {
1996: SelectableScheme scheme = column_schemes[i];
1997: if (scheme != null) {
1998: scheme.dispose();
1999: column_schemes[i] = null;
2000: }
2001: }
2002: row_list = null;
2003: table_journal = null;
2004: scheme_rebuilds = null;
2005: index_set = null;
2006: transaction = null;
2007: }
2008:
2009: public void addRootLock() {
2010: MasterTableDataSource.this .addRootLock();
2011: }
2012:
2013: public void removeRootLock() {
2014: MasterTableDataSource.this.removeRootLock();
2015: }
2016:
2017: }
2018:
2019: }
|