0001: /**
0002: * com.mckoi.database.TableDataConglomerate 18 Nov 2000
0003: *
0004: * Mckoi SQL Database ( http://www.mckoi.com/database )
0005: * Copyright (C) 2000, 2001, 2002 Diehl and Associates, Inc.
0006: *
0007: * This program is free software; you can redistribute it and/or
0008: * modify it under the terms of the GNU General Public License
0009: * Version 2 as published by the Free Software Foundation.
0010: *
0011: * This program is distributed in the hope that it will be useful,
0012: * but WITHOUT ANY WARRANTY; without even the implied warranty of
0013: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
0014: * GNU General Public License Version 2 for more details.
0015: *
0016: * You should have received a copy of the GNU General Public License
0017: * Version 2 along with this program; if not, write to the Free Software
0018: * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
0019: *
0020: * Change Log:
0021: *
0022: *
0023: */package com.mckoi.database;
0024:
0025: import java.io.*;
0026: import java.util.Iterator;
0027: import java.util.ArrayList;
0028: import java.util.HashMap;
0029: import java.util.List;
0030: import com.mckoi.util.IntegerListInterface;
0031: import com.mckoi.util.IntegerIterator;
0032: import com.mckoi.util.IntegerVector;
0033: import com.mckoi.util.ByteArrayUtil;
0034: import com.mckoi.util.UserTerminal;
0035: import com.mckoi.util.BigNumber;
0036: import com.mckoi.debug.*;
0037:
0038: import com.mckoi.store.Store;
0039: import com.mckoi.store.MutableArea;
0040: import com.mckoi.store.Area;
0041:
0042: import com.mckoi.database.StateStore.StateResource;
0043:
0044: import com.mckoi.database.global.ByteLongObject;
0045: import com.mckoi.database.global.ObjectTranslator;
0046: import com.mckoi.database.global.Ref;
0047:
0048: /**
0049: * A conglomerate of data that represents the contents of all tables in a
0050: * complete database. This object handles all data persistance management
0051: * (storage, retrieval, removal) issues. It is a transactional manager for
0052: * both data and indices in the database.
0053: *
0054: * @author Tobias Downer
0055: */
0056:
0057: public class TableDataConglomerate {
0058:
0059: /**
0060: * The postfix on the name of the state file for the database store name.
0061: */
0062: public static final String STATE_POST = "_sf";
0063:
0064: // ---------- The standard constraint/schema tables ----------
0065:
0066: /**
0067: * The name of the system schema where persistant conglomerate state is
0068: * stored.
0069: */
0070: public static final String SYSTEM_SCHEMA = "SYS_INFO";
0071:
0072: /**
0073: * The schema info table.
0074: */
0075: public static final TableName SCHEMA_INFO_TABLE = new TableName(
0076: SYSTEM_SCHEMA, "sUSRSchemaInfo");
0077:
0078: public static final TableName PERSISTENT_VAR_TABLE = new TableName(
0079: SYSTEM_SCHEMA, "sUSRDatabaseVars");
0080:
0081: public static final TableName FOREIGN_COLS_TABLE = new TableName(
0082: SYSTEM_SCHEMA, "sUSRForeignColumns");
0083:
0084: public static final TableName UNIQUE_COLS_TABLE = new TableName(
0085: SYSTEM_SCHEMA, "sUSRUniqueColumns");
0086:
0087: public static final TableName PRIMARY_COLS_TABLE = new TableName(
0088: SYSTEM_SCHEMA, "sUSRPrimaryColumns");
0089:
0090: public static final TableName CHECK_INFO_TABLE = new TableName(
0091: SYSTEM_SCHEMA, "sUSRCheckInfo");
0092:
0093: public static final TableName UNIQUE_INFO_TABLE = new TableName(
0094: SYSTEM_SCHEMA, "sUSRUniqueInfo");
0095:
0096: public static final TableName FOREIGN_INFO_TABLE = new TableName(
0097: SYSTEM_SCHEMA, "sUSRFKeyInfo");
0098:
0099: public static final TableName PRIMARY_INFO_TABLE = new TableName(
0100: SYSTEM_SCHEMA, "sUSRPKeyInfo");
0101:
0102: public static final TableName SYS_SEQUENCE_INFO = new TableName(
0103: SYSTEM_SCHEMA, "sUSRSequenceInfo");
0104:
0105: public static final TableName SYS_SEQUENCE = new TableName(
0106: SYSTEM_SCHEMA, "sUSRSequence");
0107:
0108: /**
0109: * The TransactionSystem that this Conglomerate is a child of.
0110: */
0111: private final TransactionSystem system;
0112:
0113: /**
0114: * The StoreSystem object used by this conglomerate to store the underlying
0115: * representation.
0116: */
0117: private final StoreSystem store_system;
0118:
0119: /**
0120: * The name given to this conglomerate.
0121: */
0122: private String name;
0123:
0124: /**
0125: * The actual store that backs the state store.
0126: */
0127: private Store act_state_store;
0128:
0129: /**
0130: * A store for the conglomerate state container. This
0131: * file stores information persistantly about the state of this object.
0132: */
0133: private StateStore state_store;
0134:
0135: /**
0136: * The current commit id for committed transactions. Whenever transactional
0137: * changes are committed to the conglomerate, this id is incremented.
0138: */
0139: private long commit_id;
0140:
0141: /**
0142: * The list of all tables that are currently open in this conglomerate.
0143: * This includes tables that are not committed.
0144: */
0145: private ArrayList table_list;
0146:
0147: /**
0148: * The actual Store implementation that maintains the BlobStore information
0149: * for this conglomerate (if there is one).
0150: */
0151: private Store act_blob_store;
0152:
0153: /**
0154: * The BlobStore object for this conglomerate.
0155: */
0156: private BlobStore blob_store;
0157:
0158: /**
0159: * The SequenceManager object for this conglomerate.
0160: */
0161: private SequenceManager sequence_manager;
0162:
0163: /**
0164: * The list of transactions that are currently open over this conglomerate.
0165: * This list is ordered from lowest commit_id to highest. This object is
0166: * shared with all the children MasterTableDataSource objects.
0167: */
0168: private OpenTransactionList open_transactions;
0169:
0170: /**
0171: * The list of all name space journals for the history of committed
0172: * transactions.
0173: */
0174: private ArrayList namespace_journal_list;
0175:
0176: // ---------- Table event listener ----------
0177:
0178: /**
0179: * All listeners for modification events on tables in this conglomerate.
0180: * This is a mapping from TableName -> ArrayList of listeners.
0181: */
0182: private final HashMap modification_listeners;
0183:
0184: // ---------- Locks ----------
0185:
0186: /**
0187: * This lock is obtained when we go to commit a change to the table.
0188: * Grabbing this lock ensures that no other commits can occur at the same
0189: * time on this conglomerate.
0190: */
0191: final Object commit_lock = new Object();
0192:
0193: // // ---------- Shutdown hook thread ----------
0194: //
0195: // /**
0196: // * The ConglomerateShutdownHookThread object which we create when the
0197: // * conglomerate in openned, and removed when we close the conglomerate.
0198: // */
0199: // private ConglomerateShutdownHookThread shutdown_hook = null;
0200:
0201: /**
0202: * Constructs the conglomerate.
0203: */
0204: public TableDataConglomerate(TransactionSystem system,
0205: StoreSystem store_system) {
0206: this .system = system;
0207: this .store_system = store_system;
0208: this .open_transactions = new OpenTransactionList(system);
0209: this .modification_listeners = new HashMap();
0210: this .namespace_journal_list = new ArrayList();
0211:
0212: this .sequence_manager = new SequenceManager(this );
0213:
0214: }
0215:
0216: /**
0217: * Returns the TransactionSystem that this conglomerate is part of.
0218: */
0219: public final TransactionSystem getSystem() {
0220: return system;
0221: }
0222:
0223: /**
0224: * Returns the StoreSystem used by this conglomerate to manage the
0225: * persistent state of the database.
0226: */
0227: public final StoreSystem storeSystem() {
0228: return store_system;
0229: }
0230:
0231: /**
0232: * Returns the SequenceManager object for this conglomerate.
0233: */
0234: final SequenceManager getSequenceManager() {
0235: return sequence_manager;
0236: }
0237:
0238: /**
0239: * Returns the BlobStore for this conglomerate.
0240: */
0241: final BlobStore getBlobStore() {
0242: return blob_store;
0243: }
0244:
0245: /**
0246: * Returns the DebugLogger object that we use to log debug messages to.
0247: */
0248: public final DebugLogger Debug() {
0249: return getSystem().Debug();
0250: }
0251:
0252: /**
0253: * Returns the name given to this conglomerate.
0254: */
0255: String getName() {
0256: return name;
0257: }
0258:
0259: // ---------- Conglomerate state methods ----------
0260:
0261: /**
0262: * Marks the given table id as committed dropped.
0263: */
0264: private void markAsCommittedDropped(int table_id) {
0265: MasterTableDataSource master_table = getMasterTable(table_id);
0266: state_store.addDeleteResource(new StateResource(table_id,
0267: createEncodedTableFile(master_table)));
0268: }
0269:
0270: /**
0271: * Loads the master table given the table_id and the name of the table
0272: * resource in the database path. The table_string is a specially formatted
0273: * string that we parse to determine the file structure of the table.
0274: */
0275: private MasterTableDataSource loadMasterTable(int table_id,
0276: String table_str, int table_type) throws IOException {
0277:
0278: // Open the table
0279: if (table_type == 1) {
0280: V1MasterTableDataSource master = new V1MasterTableDataSource(
0281: getSystem(), storeSystem(), open_transactions);
0282: if (master.exists(table_str)) {
0283: return master;
0284: }
0285: } else if (table_type == 2) {
0286: V2MasterTableDataSource master = new V2MasterTableDataSource(
0287: getSystem(), storeSystem(), open_transactions,
0288: blob_store);
0289: if (master.exists(table_str)) {
0290: return master;
0291: }
0292: }
0293:
0294: // If not exists, then generate an error message
0295: Debug().write(
0296: Lvl.ERROR,
0297: this ,
0298: "Couldn't find table source - resource name: "
0299: + table_str + " table_id: " + table_id);
0300:
0301: return null;
0302: }
0303:
0304: /**
0305: * Returns a string that is an encoded table file name. An encoded table
0306: * file name includes information about the table type with the name of the
0307: * table. For example, ":1ThisTable" represents a V1MasterTableDataSource
0308: * table with file name "ThisTable".
0309: */
0310: private static String createEncodedTableFile(
0311: MasterTableDataSource table) {
0312: char type;
0313: if (table instanceof V1MasterTableDataSource) {
0314: type = '1';
0315: } else if (table instanceof V2MasterTableDataSource) {
0316: type = '2';
0317: } else {
0318: throw new RuntimeException(
0319: "Unrecognised MasterTableDataSource class.");
0320: }
0321: StringBuffer buf = new StringBuffer();
0322: buf.append(':');
0323: buf.append(type);
0324: buf.append(table.getSourceIdent());
0325: return new String(buf);
0326: }
0327:
0328: /**
0329: * Reads in the list of committed tables in this conglomerate. This should
0330: * only be called during an 'open' like method. This method fills the
0331: * 'committed_tables' and 'table_list' lists with the tables in this
0332: * conglomerate.
0333: */
0334: private void readVisibleTables() throws IOException {
0335:
0336: // The list of all visible tables from the state file
0337: StateResource[] tables = state_store.getVisibleList();
0338: // For each visible table
0339: for (int i = 0; i < tables.length; ++i) {
0340: StateResource resource = tables[i];
0341:
0342: int master_table_id = (int) resource.table_id;
0343: String file_name = resource.name;
0344:
0345: // Parse the file name string and determine the table type.
0346: int table_type = 1;
0347: if (file_name.startsWith(":")) {
0348: if (file_name.charAt(1) == '1') {
0349: table_type = 1;
0350: } else if (file_name.charAt(1) == '2') {
0351: table_type = 2;
0352: } else {
0353: throw new RuntimeException(
0354: "Table type is not known.");
0355: }
0356: file_name = file_name.substring(2);
0357: }
0358:
0359: // Load the master table from the resource information
0360: MasterTableDataSource master = loadMasterTable(
0361: master_table_id, file_name, table_type);
0362:
0363: if (master == null) {
0364: throw new Error("Table file for " + file_name
0365: + " was not found.");
0366: }
0367:
0368: if (master instanceof V1MasterTableDataSource) {
0369: V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master;
0370: v1_master.open(file_name);
0371: } else if (master instanceof V2MasterTableDataSource) {
0372: V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master;
0373: v2_master.open(file_name);
0374: } else {
0375: throw new Error("Unknown master table type: "
0376: + master.getClass());
0377: }
0378:
0379: // Add the table to the table list
0380: table_list.add(master);
0381:
0382: }
0383:
0384: }
0385:
0386: /**
0387: * Checks the list of committed tables in this conglomerate. This should
0388: * only be called during an 'check' like method. This method fills the
0389: * 'committed_tables' and 'table_list' lists with the tables in this
0390: * conglomerate.
0391: */
0392: public void checkVisibleTables(UserTerminal terminal)
0393: throws IOException {
0394:
0395: // The list of all visible tables from the state file
0396: StateResource[] tables = state_store.getVisibleList();
0397: // For each visible table
0398: for (int i = 0; i < tables.length; ++i) {
0399: StateResource resource = tables[i];
0400:
0401: int master_table_id = (int) resource.table_id;
0402: String file_name = resource.name;
0403:
0404: // Parse the file name string and determine the table type.
0405: int table_type = 1;
0406: if (file_name.startsWith(":")) {
0407: if (file_name.charAt(1) == '1') {
0408: table_type = 1;
0409: } else if (file_name.charAt(1) == '2') {
0410: table_type = 2;
0411: } else {
0412: throw new RuntimeException(
0413: "Table type is not known.");
0414: }
0415: file_name = file_name.substring(2);
0416: }
0417:
0418: // Load the master table from the resource information
0419: MasterTableDataSource master = loadMasterTable(
0420: master_table_id, file_name, table_type);
0421:
0422: if (master instanceof V1MasterTableDataSource) {
0423: V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master;
0424: v1_master.checkAndRepair(file_name, terminal);
0425: } else if (master instanceof V2MasterTableDataSource) {
0426: V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master;
0427: v2_master.checkAndRepair(file_name, terminal);
0428: } else {
0429: throw new Error("Unknown master table type: "
0430: + master.getClass());
0431: }
0432:
0433: // Add the table to the table list
0434: table_list.add(master);
0435:
0436: // Set a check point
0437: store_system.setCheckPoint();
0438:
0439: }
0440:
0441: }
0442:
0443: /**
0444: * Reads in the list of committed dropped tables on this conglomerate. This
0445: * should only be called during an 'open' like method. This method fills
0446: * the 'committed_dropped' and 'table_list' lists with the tables in this
0447: * conglomerate.
0448: * <p>
0449: * @param terminal the terminal to ask questions if problems are found. If
0450: * null then an exception is thrown if there are problems.
0451: */
0452: private void readDroppedTables() throws IOException {
0453:
0454: // The list of all dropped tables from the state file
0455: StateResource[] tables = state_store.getDeleteList();
0456: // For each visible table
0457: for (int i = 0; i < tables.length; ++i) {
0458: StateResource resource = tables[i];
0459:
0460: int master_table_id = (int) resource.table_id;
0461: String file_name = resource.name;
0462:
0463: // Parse the file name string and determine the table type.
0464: int table_type = 1;
0465: if (file_name.startsWith(":")) {
0466: if (file_name.charAt(1) == '1') {
0467: table_type = 1;
0468: } else if (file_name.charAt(1) == '2') {
0469: table_type = 2;
0470: } else {
0471: throw new RuntimeException(
0472: "Table type is not known.");
0473: }
0474: file_name = file_name.substring(2);
0475: }
0476:
0477: // Load the master table from the resource information
0478: MasterTableDataSource master = loadMasterTable(
0479: master_table_id, file_name, table_type);
0480:
0481: // File wasn't found so remove from the delete resources
0482: if (master == null) {
0483: state_store.removeDeleteResource(resource.name);
0484: } else {
0485: if (master instanceof V1MasterTableDataSource) {
0486: V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master;
0487: v1_master.open(file_name);
0488: } else if (master instanceof V2MasterTableDataSource) {
0489: V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master;
0490: v2_master.open(file_name);
0491: } else {
0492: throw new Error("Unknown master table type: "
0493: + master.getClass());
0494: }
0495:
0496: // Add the table to the table list
0497: table_list.add(master);
0498: }
0499:
0500: }
0501:
0502: // Commit any changes to the state store
0503: state_store.commit();
0504:
0505: }
0506:
0507: /**
0508: * Create the system tables that must be present in a conglomerates. These
0509: * tables consist of contraint and table management data.
0510: * <p>
0511: * <pre>
0512: * sUSRPKeyInfo - Primary key constraint information.
0513: * sUSRFKeyInfo - Foreign key constraint information.
0514: * sUSRUniqueInfo - Unique set constraint information.
0515: * sUSRCheckInfo - Check constraint information.
0516: * sUSRPrimaryColumns - Primary columns information (refers to PKeyInfo)
0517: * sUSRUniqueColumns - Unique columns information (refers to UniqueInfo)
0518: * sUSRForeignColumns1 - Foreign column information (refers to FKeyInfo)
0519: * sUSRForeignColumns2 - Secondary Foreign column information (refers to
0520: * FKeyInfo).
0521: * </pre>
0522: * These tables handle data for referential integrity. There are also some
0523: * additional tables containing general table information.
0524: * <pre>
0525: * sUSRTableColumnInfo - All table and column information.
0526: * </pre>
0527: * The design is fairly elegant in that we are using the database to store
0528: * information to maintain referential integrity.
0529: * <p><pre>
0530: * The schema layout for these tables;
0531: *
0532: * CREATE TABLE sUSRPKeyInfo (
0533: * id NUMERIC NOT NULL,
0534: * name TEXT NOT NULL, // The name of the primary key constraint
0535: * schema TEXT NOT NULL, // The name of the schema
0536: * table TEXT NOT NULL, // The name of the table
0537: * deferred BIT NOT NULL, // Whether deferred or immediate
0538: * PRIMARY KEY (id),
0539: * UNIQUE (schema, table)
0540: * );
0541: * CREATE TABLE sUSRFKeyInfo (
0542: * id NUMERIC NOT NULL,
0543: * name TEXT NOT NULL, // The name of the foreign key constraint
0544: * schema TEXT NOT NULL, // The name of the schema
0545: * table TEXT NOT NULL, // The name of the table
0546: * ref_schema TEXT NOT NULL, // The name of the schema referenced
0547: * ref_table TEXT NOT NULL, // The name of the table referenced
0548: * update_rule TEXT NOT NULL, // The rule for updating to table
0549: * delete_rule TEXT NOT NULL, // The rule for deleting from table
0550: * deferred BIT NOT NULL, // Whether deferred or immediate
0551: * PRIMARY KEY (id)
0552: * );
0553: * CREATE TABLE sUSRUniqueInfo (
0554: * id NUMERIC NOT NULL,
0555: * name TEXT NOT NULL, // The name of the unique constraint
0556: * schema TEXT NOT NULL, // The name of the schema
0557: * table TEXT NOT NULL, // The name of the table
0558: * deferred BIT NOT NULL, // Whether deferred or immediate
0559: * PRIMARY KEY (id)
0560: * );
0561: * CREATE TABLE sUSRCheckInfo (
0562: * id NUMERIC NOT NULL,
0563: * name TEXT NOT NULL, // The name of the check constraint
0564: * schema TEXT NOT NULL, // The name of the schema
0565: * table TEXT NOT NULL, // The name of the table
0566: * expression TEXT NOT NULL, // The check expression
0567: * deferred BIT NOT NULL, // Whether deferred or immediate
0568: * PRIMARY KEY (id)
0569: * );
0570: * CREATE TABLE sUSRPrimaryColumns (
0571: * pk_id NUMERIC NOT NULL, // The primary key constraint id
0572: * column TEXT NOT NULL, // The name of the primary
0573: * seq_no INTEGER NOT NULL, // The sequence number of this constraint
0574: * FOREIGN KEY pk_id REFERENCES sUSRPKeyInfo
0575: * );
0576: * CREATE TABLE sUSRUniqueColumns (
0577: * un_id NUMERIC NOT NULL, // The unique constraint id
0578: * column TEXT NOT NULL, // The column that is unique
0579: * seq_no INTEGER NOT NULL, // The sequence number of this constraint
0580: * FOREIGN KEY un_id REFERENCES sUSRUniqueInfo
0581: * );
0582: * CREATE TABLE sUSRForeignColumns (
0583: * fk_id NUMERIC NOT NULL, // The foreign key constraint id
0584: * fcolumn TEXT NOT NULL, // The column in the foreign key
0585: * pcolumn TEXT NOT NULL, // The column in the primary key
0586: * // (referenced)
0587: * seq_no INTEGER NOT NULL, // The sequence number of this constraint
0588: * FOREIGN KEY fk_id REFERENCES sUSRFKeyInfo
0589: * );
0590: * CREATE TABLE sUSRSchemaInfo (
0591: * id NUMERIC NOT NULL,
0592: * name TEXT NOT NULL,
0593: * type TEXT, // Schema type (system, etc)
0594: * other TEXT,
0595: *
0596: * UNIQUE ( name )
0597: * );
0598: * CREATE TABLE sUSRTableInfo (
0599: * id NUMERIC NOT NULL,
0600: * name TEXT NOT NULL, // The name of the table
0601: * schema TEXT NOT NULL, // The name of the schema of this table
0602: * type TEXT, // Table type (temporary, system, etc)
0603: * other TEXT, // Notes, etc
0604: *
0605: * UNIQUE ( name )
0606: * );
0607: * CREATE TABLE sUSRColumnColumns (
0608: * t_id NUMERIC NOT NULL, // Foreign key to sUSRTableInfo
0609: * column TEXT NOT NULL, // The column name
0610: * seq_no INTEGER NOT NULL, // The sequence in the table
0611: * type TEXT NOT NULL, // The SQL type of this column
0612: * size NUMERIC, // The size of the column if applicable
0613: * scale NUMERIC, // The scale of the column if applicable
0614: * default TEXT NOT NULL, // The default expression
0615: * constraints TEXT NOT NULL, // The constraints of this column
0616: * other TEXT, // Notes, etc
0617: *
0618: * FOREIGN KEY t_id REFERENCES sUSRTableInfo,
0619: * UNIQUE ( t_id, column )
0620: * );
0621: *
0622: * </pre>
0623: */
0624: void updateSystemTableSchema() {
0625: // Create the transaction
0626: Transaction transaction = createTransaction();
0627:
0628: DataTableDef table;
0629:
0630: table = new DataTableDef();
0631: table.setTableName(SYS_SEQUENCE_INFO);
0632: table.addColumn(DataTableColumnDef.createNumericColumn("id"));
0633: table
0634: .addColumn(DataTableColumnDef
0635: .createStringColumn("schema"));
0636: table.addColumn(DataTableColumnDef.createStringColumn("name"));
0637: table.addColumn(DataTableColumnDef.createNumericColumn("type"));
0638: transaction.alterCreateTable(table, 187, 128);
0639:
0640: table = new DataTableDef();
0641: table.setTableName(SYS_SEQUENCE);
0642: table.addColumn(DataTableColumnDef
0643: .createNumericColumn("seq_id"));
0644: table.addColumn(DataTableColumnDef
0645: .createNumericColumn("last_value"));
0646: table.addColumn(DataTableColumnDef
0647: .createNumericColumn("increment"));
0648: table.addColumn(DataTableColumnDef
0649: .createNumericColumn("minvalue"));
0650: table.addColumn(DataTableColumnDef
0651: .createNumericColumn("maxvalue"));
0652: table
0653: .addColumn(DataTableColumnDef
0654: .createNumericColumn("start"));
0655: table
0656: .addColumn(DataTableColumnDef
0657: .createNumericColumn("cache"));
0658: table
0659: .addColumn(DataTableColumnDef
0660: .createBooleanColumn("cycle"));
0661: transaction.alterCreateTable(table, 187, 128);
0662:
0663: table = new DataTableDef();
0664: table.setTableName(PRIMARY_INFO_TABLE);
0665: table.addColumn(DataTableColumnDef.createNumericColumn("id"));
0666: table.addColumn(DataTableColumnDef.createStringColumn("name"));
0667: table
0668: .addColumn(DataTableColumnDef
0669: .createStringColumn("schema"));
0670: table.addColumn(DataTableColumnDef.createStringColumn("table"));
0671: table.addColumn(DataTableColumnDef
0672: .createNumericColumn("deferred"));
0673: transaction.alterCreateTable(table, 187, 128);
0674:
0675: table = new DataTableDef();
0676: table.setTableName(FOREIGN_INFO_TABLE);
0677: table.addColumn(DataTableColumnDef.createNumericColumn("id"));
0678: table.addColumn(DataTableColumnDef.createStringColumn("name"));
0679: table
0680: .addColumn(DataTableColumnDef
0681: .createStringColumn("schema"));
0682: table.addColumn(DataTableColumnDef.createStringColumn("table"));
0683: table.addColumn(DataTableColumnDef
0684: .createStringColumn("ref_schema"));
0685: table.addColumn(DataTableColumnDef
0686: .createStringColumn("ref_table"));
0687: table.addColumn(DataTableColumnDef
0688: .createStringColumn("update_rule"));
0689: table.addColumn(DataTableColumnDef
0690: .createStringColumn("delete_rule"));
0691: table.addColumn(DataTableColumnDef
0692: .createNumericColumn("deferred"));
0693: transaction.alterCreateTable(table, 187, 128);
0694:
0695: table = new DataTableDef();
0696: table.setTableName(UNIQUE_INFO_TABLE);
0697: table.addColumn(DataTableColumnDef.createNumericColumn("id"));
0698: table.addColumn(DataTableColumnDef.createStringColumn("name"));
0699: table
0700: .addColumn(DataTableColumnDef
0701: .createStringColumn("schema"));
0702: table.addColumn(DataTableColumnDef.createStringColumn("table"));
0703: table.addColumn(DataTableColumnDef
0704: .createNumericColumn("deferred"));
0705: transaction.alterCreateTable(table, 187, 128);
0706:
0707: table = new DataTableDef();
0708: table.setTableName(CHECK_INFO_TABLE);
0709: table.addColumn(DataTableColumnDef.createNumericColumn("id"));
0710: table.addColumn(DataTableColumnDef.createStringColumn("name"));
0711: table
0712: .addColumn(DataTableColumnDef
0713: .createStringColumn("schema"));
0714: table.addColumn(DataTableColumnDef.createStringColumn("table"));
0715: table.addColumn(DataTableColumnDef
0716: .createStringColumn("expression"));
0717: table.addColumn(DataTableColumnDef
0718: .createNumericColumn("deferred"));
0719: table.addColumn(DataTableColumnDef
0720: .createBinaryColumn("serialized_expression"));
0721: transaction.alterCreateTable(table, 187, 128);
0722:
0723: table = new DataTableDef();
0724: table.setTableName(PRIMARY_COLS_TABLE);
0725: table
0726: .addColumn(DataTableColumnDef
0727: .createNumericColumn("pk_id"));
0728: table
0729: .addColumn(DataTableColumnDef
0730: .createStringColumn("column"));
0731: table.addColumn(DataTableColumnDef
0732: .createNumericColumn("seq_no"));
0733: transaction.alterCreateTable(table, 91, 128);
0734:
0735: table = new DataTableDef();
0736: table.setTableName(UNIQUE_COLS_TABLE);
0737: table
0738: .addColumn(DataTableColumnDef
0739: .createNumericColumn("un_id"));
0740: table
0741: .addColumn(DataTableColumnDef
0742: .createStringColumn("column"));
0743: table.addColumn(DataTableColumnDef
0744: .createNumericColumn("seq_no"));
0745: transaction.alterCreateTable(table, 91, 128);
0746:
0747: table = new DataTableDef();
0748: table.setTableName(FOREIGN_COLS_TABLE);
0749: table
0750: .addColumn(DataTableColumnDef
0751: .createNumericColumn("fk_id"));
0752: table.addColumn(DataTableColumnDef
0753: .createStringColumn("fcolumn"));
0754: table.addColumn(DataTableColumnDef
0755: .createStringColumn("pcolumn"));
0756: table.addColumn(DataTableColumnDef
0757: .createNumericColumn("seq_no"));
0758: transaction.alterCreateTable(table, 91, 128);
0759:
0760: table = new DataTableDef();
0761: table.setTableName(SCHEMA_INFO_TABLE);
0762: table.addColumn(DataTableColumnDef.createNumericColumn("id"));
0763: table.addColumn(DataTableColumnDef.createStringColumn("name"));
0764: table.addColumn(DataTableColumnDef.createStringColumn("type"));
0765: table.addColumn(DataTableColumnDef.createStringColumn("other"));
0766: transaction.alterCreateTable(table, 91, 128);
0767:
0768: // Stores misc variables of the database,
0769: table = new DataTableDef();
0770: table.setTableName(PERSISTENT_VAR_TABLE);
0771: table.addColumn(DataTableColumnDef
0772: .createStringColumn("variable"));
0773: table.addColumn(DataTableColumnDef.createStringColumn("value"));
0774: transaction.alterCreateTable(table, 91, 128);
0775:
0776: // Commit and close the transaction.
0777: try {
0778: transaction.closeAndCommit();
0779: } catch (TransactionException e) {
0780: Debug().writeException(e);
0781: throw new Error(
0782: "Transaction Exception creating conglomerate.");
0783: }
0784:
0785: }
0786:
0787: /**
0788: * Given a table with a 'id' field, this will check that the sequence
0789: * value for the table is at least greater than the maximum id in the column.
0790: */
0791: void resetTableID(TableName tname) {
0792: // Create the transaction
0793: Transaction transaction = createTransaction();
0794: // Get the table
0795: MutableTableDataSource table = transaction.getTable(tname);
0796: // Find the index of the column name called 'id'
0797: DataTableDef table_def = table.getDataTableDef();
0798: int col_index = table_def.findColumnName("id");
0799: if (col_index == -1) {
0800: throw new Error("Column name 'id' not found.");
0801: }
0802: // Find the maximum 'id' value.
0803: SelectableScheme scheme = table.getColumnScheme(col_index);
0804: IntegerVector ivec = scheme.selectLast();
0805: if (ivec.size() > 0) {
0806: TObject ob = table
0807: .getCellContents(col_index, ivec.intAt(0));
0808: BigNumber b_num = ob.toBigNumber();
0809: if (b_num != null) {
0810: // Set the unique id to +1 the maximum id value in the column
0811: transaction.setUniqueID(tname, b_num.longValue() + 1L);
0812: }
0813: }
0814:
0815: // Commit and close the transaction.
0816: try {
0817: transaction.closeAndCommit();
0818: } catch (TransactionException e) {
0819: Debug().writeException(e);
0820: throw new Error(
0821: "Transaction Exception creating conglomerate.");
0822: }
0823: }
0824:
0825: /**
0826: * Resets the table sequence id for all the system tables managed by the
0827: * conglomerate.
0828: */
0829: void resetAllSystemTableID() {
0830: resetTableID(PRIMARY_INFO_TABLE);
0831: resetTableID(FOREIGN_INFO_TABLE);
0832: resetTableID(UNIQUE_INFO_TABLE);
0833: resetTableID(CHECK_INFO_TABLE);
0834: resetTableID(SCHEMA_INFO_TABLE);
0835: }
0836:
0837: /**
0838: * Populates the system table schema with initial data for an empty
0839: * conglomerate. This sets up the standard variables and table
0840: * constraint data.
0841: */
0842: private void initializeSystemTableSchema() {
0843: // Create the transaction
0844: Transaction transaction = createTransaction();
0845:
0846: // Insert the two default schema names,
0847: transaction.createSchema(SYSTEM_SCHEMA, "SYSTEM");
0848:
0849: // -- Primary Keys --
0850: // The 'id' columns are primary keys on all the system tables,
0851: final String[] id_col = new String[] { "id" };
0852: transaction.addPrimaryKeyConstraint(PRIMARY_INFO_TABLE, id_col,
0853: Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PK_PK");
0854: transaction.addPrimaryKeyConstraint(FOREIGN_INFO_TABLE, id_col,
0855: Transaction.INITIALLY_IMMEDIATE, "SYSTEM_FK_PK");
0856: transaction.addPrimaryKeyConstraint(UNIQUE_INFO_TABLE, id_col,
0857: Transaction.INITIALLY_IMMEDIATE, "SYSTEM_UNIQUE_PK");
0858: transaction.addPrimaryKeyConstraint(CHECK_INFO_TABLE, id_col,
0859: Transaction.INITIALLY_IMMEDIATE, "SYSTEM_CHECK_PK");
0860: transaction.addPrimaryKeyConstraint(SCHEMA_INFO_TABLE, id_col,
0861: Transaction.INITIALLY_IMMEDIATE, "SYSTEM_SCHEMA_PK");
0862:
0863: // -- Foreign Keys --
0864: // Create the foreign key references,
0865: final String[] fk_col = new String[1];
0866: final String[] fk_ref_col = new String[] { "id" };
0867: fk_col[0] = "pk_id";
0868: transaction.addForeignKeyConstraint(PRIMARY_COLS_TABLE, fk_col,
0869: PRIMARY_INFO_TABLE, fk_ref_col, Transaction.NO_ACTION,
0870: Transaction.NO_ACTION, Transaction.INITIALLY_IMMEDIATE,
0871: "SYSTEM_PK_FK");
0872: fk_col[0] = "fk_id";
0873: transaction.addForeignKeyConstraint(FOREIGN_COLS_TABLE, fk_col,
0874: FOREIGN_INFO_TABLE, fk_ref_col, Transaction.NO_ACTION,
0875: Transaction.NO_ACTION, Transaction.INITIALLY_IMMEDIATE,
0876: "SYSTEM_FK_FK");
0877: fk_col[0] = "un_id";
0878: transaction.addForeignKeyConstraint(UNIQUE_COLS_TABLE, fk_col,
0879: UNIQUE_INFO_TABLE, fk_ref_col, Transaction.NO_ACTION,
0880: Transaction.NO_ACTION, Transaction.INITIALLY_IMMEDIATE,
0881: "SYSTEM_UNIQUE_FK");
0882:
0883: // sUSRPKeyInfo 'schema', 'table' column is a unique set,
0884: // (You are only allowed one primary key per table).
0885: String[] columns = new String[] { "schema", "table" };
0886: transaction.addUniqueConstraint(PRIMARY_INFO_TABLE, columns,
0887: Transaction.INITIALLY_IMMEDIATE,
0888: "SYSTEM_PKEY_ST_UNIQUE");
0889: // sUSRSchemaInfo 'name' column is a unique column,
0890: columns = new String[] { "name" };
0891: transaction
0892: .addUniqueConstraint(SCHEMA_INFO_TABLE, columns,
0893: Transaction.INITIALLY_IMMEDIATE,
0894: "SYSTEM_SCHEMA_UNIQUE");
0895: // columns = new String[] { "name" };
0896: columns = new String[] { "name", "schema" };
0897: // sUSRPKeyInfo 'name' column is a unique column,
0898: transaction.addUniqueConstraint(PRIMARY_INFO_TABLE, columns,
0899: Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PKEY_UNIQUE");
0900: // sUSRFKeyInfo 'name' column is a unique column,
0901: transaction.addUniqueConstraint(FOREIGN_INFO_TABLE, columns,
0902: Transaction.INITIALLY_IMMEDIATE, "SYSTEM_FKEY_UNIQUE");
0903: // sUSRUniqueInfo 'name' column is a unique column,
0904: transaction
0905: .addUniqueConstraint(UNIQUE_INFO_TABLE, columns,
0906: Transaction.INITIALLY_IMMEDIATE,
0907: "SYSTEM_UNIQUE_UNIQUE");
0908: // sUSRCheckInfo 'name' column is a unique column,
0909: transaction.addUniqueConstraint(CHECK_INFO_TABLE, columns,
0910: Transaction.INITIALLY_IMMEDIATE, "SYSTEM_CHECK_UNIQUE");
0911:
0912: // sUSRDatabaseVars 'variable' is unique
0913: columns = new String[] { "variable" };
0914: transaction.addUniqueConstraint(PERSISTENT_VAR_TABLE, columns,
0915: Transaction.INITIALLY_IMMEDIATE,
0916: "SYSTEM_DATABASEVARS_UNIQUE");
0917:
0918: // Insert the version number of the database
0919: transaction.setPersistentVar("database.version", "1.4");
0920:
0921: // Commit and close the transaction.
0922: try {
0923: transaction.closeAndCommit();
0924: } catch (TransactionException e) {
0925: Debug().writeException(e);
0926: throw new Error(
0927: "Transaction Exception initializing conglomerate.");
0928: }
0929:
0930: }
0931:
0932: /**
0933: * Initializes the BlobStore. If the BlobStore doesn't exist it will be
0934: * created, and if it does exist it will be initialized.
0935: */
0936: private void initializeBlobStore() throws IOException {
0937:
0938: // Does the file already exist?
0939: boolean blob_store_exists = storeSystem().storeExists(
0940: "BlobStore");
0941: // If the blob store doesn't exist and we are read_only, we can't do
0942: // anything further so simply return.
0943: if (!blob_store_exists && isReadOnly()) {
0944: return;
0945: }
0946:
0947: // The blob store,
0948: if (blob_store_exists) {
0949: act_blob_store = storeSystem().openStore("BlobStore");
0950: } else {
0951: act_blob_store = storeSystem().createStore("BlobStore");
0952: }
0953:
0954: try {
0955: act_blob_store.lockForWrite();
0956:
0957: // Create the BlobStore object
0958: blob_store = new BlobStore(act_blob_store);
0959:
0960: // Get the 64 byte fixed area
0961: MutableArea fixed_area = act_blob_store.getMutableArea(-1);
0962: // If the blob store didn't exist then we need to create it here,
0963: if (!blob_store_exists) {
0964: long header_p = blob_store.create();
0965: fixed_area.putLong(header_p);
0966: fixed_area.checkOut();
0967: } else {
0968: // Otherwise we need to initialize the blob store
0969: long header_p = fixed_area.getLong();
0970: blob_store.init(header_p);
0971: }
0972: } finally {
0973: act_blob_store.unlockForWrite();
0974: }
0975:
0976: }
0977:
0978: // ---------- Private methods ----------
0979:
0980: /**
0981: * Returns true if the system is in read only mode.
0982: */
0983: private boolean isReadOnly() {
0984: return system.readOnlyAccess();
0985: }
0986:
0987: /**
0988: * Returns the path of the database.
0989: */
0990: private File getPath() {
0991: return system.getDatabasePath();
0992: }
0993:
0994: /**
0995: * Returns the next unique table_id value for a new table and updates the
0996: * conglomerate state information as appropriate.
0997: */
0998: private int nextUniqueTableID() throws IOException {
0999: return state_store.nextTableID();
1000: }
1001:
1002: /**
1003: * Sets up the internal state of this object.
1004: */
1005: private void setupInternal() {
1006: commit_id = 0;
1007: table_list = new ArrayList();
1008:
1009: // // If the VM supports shutdown hook,
1010: // try {
1011: // shutdown_hook = new ConglomerateShutdownHookThread();
1012: // Runtime.getRuntime().addShutdownHook(shutdown_hook);
1013: // }
1014: // catch (Throwable e) {
1015: // // Catch instantiation/access errors
1016: // system.Debug().write(Lvl.MESSAGE, this,
1017: // "Unable to register shutdown hook.");
1018: // }
1019:
1020: }
1021:
1022: // ---------- Public methods ----------
1023:
1024: /**
1025: * Minimally creates a new conglomerate but does NOT initialize any of the
1026: * system tables. This is a useful feature for a copy function that requires
1027: * a TableDataConglomerate object to copy data into but does not require any
1028: * initial system tables (because this information is copied from the source
1029: * conglomerate.
1030: */
1031: void minimalCreate(String name) throws IOException {
1032: this .name = name;
1033:
1034: if (exists(name)) {
1035: throw new IOException("Conglomerate already exists: "
1036: + name);
1037: }
1038:
1039: // Lock the store system (generates an IOException if exclusive lock
1040: // can not be made).
1041: if (!isReadOnly()) {
1042: storeSystem().lock(name);
1043: }
1044:
1045: // Create/Open the state store
1046: act_state_store = storeSystem().createStore(name + STATE_POST);
1047: try {
1048: act_state_store.lockForWrite();
1049:
1050: state_store = new StateStore(act_state_store);
1051: long head_p = state_store.create();
1052: // Get the fixed area
1053: MutableArea fixed_area = act_state_store.getMutableArea(-1);
1054: fixed_area.putLong(head_p);
1055: fixed_area.checkOut();
1056: } finally {
1057: act_state_store.unlockForWrite();
1058: }
1059:
1060: setupInternal();
1061:
1062: // Init the conglomerate blob store
1063: initializeBlobStore();
1064:
1065: // Create the system table (but don't initialize)
1066: updateSystemTableSchema();
1067:
1068: }
1069:
1070: /**
1071: * Creates a new conglomerate at the given path in the file system. This
1072: * must be an empty directory where files can be stored. This will create
1073: * the conglomerate and exit in an open (read/write) state.
1074: */
1075: public void create(String name) throws IOException {
1076: minimalCreate(name);
1077:
1078: // Initialize the conglomerate system tables.
1079: initializeSystemTableSchema();
1080:
1081: // Commit the state
1082: state_store.commit();
1083:
1084: }
1085:
1086: /**
1087: * Opens a conglomerate. If the conglomerate does not exist then an
1088: * IOException is generated. Once a conglomerate is open, we may start
1089: * opening transactions and altering the data within it.
1090: */
1091: public void open(String name) throws IOException {
1092: this .name = name;
1093:
1094: if (!exists(name)) {
1095: throw new IOException("Conglomerate doesn't exists: "
1096: + name);
1097: }
1098:
1099: // Check the file lock
1100: if (!isReadOnly()) {
1101: // Obtain the lock (generate error if this is not possible)
1102: storeSystem().lock(name);
1103: }
1104:
1105: // Open the state store
1106: act_state_store = storeSystem().openStore(name + STATE_POST);
1107: state_store = new StateStore(act_state_store);
1108: // Get the fixed 64 byte area.
1109: Area fixed_area = act_state_store.getArea(-1);
1110: long head_p = fixed_area.getLong();
1111: state_store.init(head_p);
1112:
1113: setupInternal();
1114:
1115: // Init the conglomerate blob store
1116: initializeBlobStore();
1117:
1118: readVisibleTables();
1119: readDroppedTables();
1120:
1121: // We possibly have things to clean up if there are deleted columns.
1122: cleanUpConglomerate();
1123:
1124: }
1125:
1126: /**
1127: * Closes this conglomerate. The conglomerate must be open for it to be
1128: * closed. When closed, any use of this object is undefined.
1129: */
1130: public void close() throws IOException {
1131: synchronized (commit_lock) {
1132:
1133: // We possibly have things to clean up.
1134: cleanUpConglomerate();
1135:
1136: // Set a check point
1137: store_system.setCheckPoint();
1138:
1139: // Go through and close all the committed tables.
1140: int size = table_list.size();
1141: for (int i = 0; i < size; ++i) {
1142: MasterTableDataSource master = (MasterTableDataSource) table_list
1143: .get(i);
1144: master.dispose(false);
1145: }
1146:
1147: state_store.commit();
1148: storeSystem().closeStore(act_state_store);
1149:
1150: table_list = null;
1151:
1152: }
1153:
1154: // Unlock the storage system
1155: storeSystem().unlock(name);
1156:
1157: if (blob_store != null) {
1158: storeSystem().closeStore(act_blob_store);
1159: }
1160:
1161: // removeShutdownHook();
1162: }
1163:
1164: // /**
1165: // * Removes the shutdown hook.
1166: // */
1167: // private void removeShutdownHook() {
1168: // // If the VM supports shutdown hook, remove it,
1169: // try {
1170: // if (shutdown_hook != null) {
1171: //// System.out.println("REMOVING: " + this);
1172: // Runtime.getRuntime().removeShutdownHook(shutdown_hook);
1173: // // We have no start it otherwise the ThreadGroup won't remove its
1174: // // reference to it and it causes GC problems.
1175: // shutdown_hook.start();
1176: // shutdown_hook.waitUntilComplete();
1177: // shutdown_hook = null;
1178: // }
1179: // }
1180: // catch (Throwable e) {
1181: // // Catch (and ignore) instantiation/access errors
1182: // }
1183: // }
1184:
1185: /**
1186: * Deletes and closes the conglomerate. This will delete all the files in
1187: * the file system associated with this conglomerate, so this method should
1188: * be used with care.
1189: * <p>
1190: * WARNING: Will result in total loss of all data stored in the conglomerate.
1191: */
1192: public void delete() throws IOException {
1193: synchronized (commit_lock) {
1194:
1195: // We possibly have things to clean up.
1196: cleanUpConglomerate();
1197:
1198: // Go through and delete and close all the committed tables.
1199: int size = table_list.size();
1200: for (int i = 0; i < size; ++i) {
1201: MasterTableDataSource master = (MasterTableDataSource) table_list
1202: .get(i);
1203: master.drop();
1204: }
1205:
1206: // Delete the state file
1207: state_store.commit();
1208: storeSystem().closeStore(act_state_store);
1209: storeSystem().deleteStore(act_state_store);
1210:
1211: // Delete the blob store
1212: if (blob_store != null) {
1213: storeSystem().closeStore(act_blob_store);
1214: storeSystem().deleteStore(act_blob_store);
1215: }
1216:
1217: // Invalidate this object
1218: table_list = null;
1219:
1220: }
1221:
1222: // Unlock the storage system.
1223: storeSystem().unlock(name);
1224: }
1225:
1226: /**
1227: * Returns true if the conglomerate is closed.
1228: */
1229: public boolean isClosed() {
1230: synchronized (commit_lock) {
1231: return table_list == null;
1232: }
1233: }
1234:
1235: /**
1236: * Returns true if the conglomerate exists in the file system and can
1237: * be opened.
1238: */
1239: public boolean exists(String name) throws IOException {
1240: return storeSystem().storeExists(name + STATE_POST);
1241: }
1242:
1243: /**
1244: * Makes a complete copy of this database to the position represented by the
1245: * given TableDataConglomerate object. The given TableDataConglomerate
1246: * object must NOT be being used by another database running in the JVM.
1247: * This may take a while to complete. The backup operation occurs within its
1248: * own transaction and the copy transaction is read-only meaning there is no
1249: * way for the copy process to interfere with other transactions running
1250: * concurrently.
1251: * <p>
1252: * The conglomerate must be open before this method is called.
1253: */
1254: public void liveCopyTo(TableDataConglomerate dest_conglomerate)
1255: throws IOException {
1256:
1257: // The destination store system
1258: StoreSystem dest_store_system = dest_conglomerate.storeSystem();
1259:
1260: // Copy all the blob data from the given blob store to the current blob
1261: // store.
1262: dest_conglomerate.blob_store.copyFrom(dest_store_system,
1263: blob_store);
1264:
1265: // Open new transaction - this is the current view we are going to copy.
1266: Transaction transaction = createTransaction();
1267:
1268: try {
1269:
1270: // Copy the data in this transaction to the given destination store system.
1271: transaction.liveCopyAllDataTo(dest_conglomerate);
1272:
1273: } finally {
1274: // Make sure we close the transaction
1275: try {
1276: transaction.closeAndCommit();
1277: } catch (TransactionException e) {
1278: throw new RuntimeException("Transaction Error: "
1279: + e.getMessage());
1280: }
1281: }
1282:
1283: // Finished - increment the live copies counter.
1284: getSystem().stats().increment(
1285: "TableDataConglomerate.liveCopies");
1286:
1287: }
1288:
1289: // ---------- Diagnostic and repair ----------
1290:
1291: /**
1292: * Returns a RawDiagnosticTable object that is used for diagnostics of the
1293: * table with the given file name.
1294: */
1295: public RawDiagnosticTable getDiagnosticTable(String table_file_name) {
1296: synchronized (commit_lock) {
1297: for (int i = 0; i < table_list.size(); ++i) {
1298: MasterTableDataSource master = (MasterTableDataSource) table_list
1299: .get(i);
1300: if (master.getSourceIdent().equals(table_file_name)) {
1301: return master.getRawDiagnosticTable();
1302: }
1303: }
1304: }
1305: return null;
1306: }
1307:
1308: /**
1309: * Returns the list of file names for all tables in this conglomerate.
1310: */
1311: public String[] getAllTableFileNames() {
1312: synchronized (commit_lock) {
1313: String[] list = new String[table_list.size()];
1314: for (int i = 0; i < table_list.size(); ++i) {
1315: MasterTableDataSource master = (MasterTableDataSource) table_list
1316: .get(i);
1317: list[i] = master.getSourceIdent();
1318: }
1319: return list;
1320: }
1321: }
1322:
1323: // ---------- Conglomerate event notification ----------
1324:
1325: /**
1326: * Adds a listener for transactional modification events that occur on the
1327: * given table in this conglomerate. A transactional modification event is
1328: * an event fired immediately upon the modification of a table by a
1329: * transaction, either immediately before the modification or immediately
1330: * after. Also an event is fired when a modification to a table is
1331: * successfully committed.
1332: * <p>
1333: * The BEFORE_* type triggers are given the opportunity to modify the
1334: * contents of the RowData before the update or insert occurs. All triggers
1335: * may generate an exception which will cause the transaction to rollback.
1336: * <p>
1337: * The event carries with it the event type, the transaction that the event
1338: * occurred in, and any information regarding the modification itself.
1339: * <p>
1340: * This event/listener mechanism is intended to be used to implement higher
1341: * layer database triggering systems. Note that care must be taken with
1342: * the commit level events because they occur inside a commit lock on this
1343: * conglomerate and so synchronization and deadlock issues need to be
1344: * carefully considered.
1345: * <p>
1346: * NOTE: A listener on the given table will be notified of ALL table
1347: * modification events by all transactions at the time they happen.
1348: *
1349: * @param table_name the name of the table in the conglomerate to listen for
1350: * events from.
1351: * @param listener the listener to be notified of events.
1352: */
1353: public void addTransactionModificationListener(
1354: TableName table_name,
1355: TransactionModificationListener listener) {
1356: synchronized (modification_listeners) {
1357: ArrayList list = (ArrayList) modification_listeners
1358: .get(table_name);
1359: if (list == null) {
1360: // If the mapping doesn't exist then create the list for the table
1361: // here.
1362: list = new ArrayList();
1363: modification_listeners.put(table_name, list);
1364: }
1365:
1366: list.add(listener);
1367: }
1368: }
1369:
1370: /**
1371: * Removes a listener for transaction modification events on the given table
1372: * in this conglomerate as previously set by the
1373: * 'addTransactionModificationListener' method.
1374: *
1375: * @param table_name the name of the table in the conglomerate to remove from
1376: * the listener list.
1377: * @param listener the listener to be removed.
1378: */
1379: public void removeTransactionModificationListener(
1380: TableName table_name,
1381: TransactionModificationListener listener) {
1382: synchronized (modification_listeners) {
1383: ArrayList list = (ArrayList) modification_listeners
1384: .get(table_name);
1385: if (list != null) {
1386: int sz = list.size();
1387: for (int i = sz - 1; i >= 0; --i) {
1388: if (list.get(i) == listener) {
1389: list.remove(i);
1390: }
1391: }
1392: }
1393: }
1394: }
1395:
1396: // ---------- Transactional management ----------
1397:
1398: /**
1399: * Starts a new transaction. The Transaction object returned by this
1400: * method is used to read the contents of the database at the time
1401: * the transaction was started. It is also used if any modifications are
1402: * required to be made.
1403: */
1404: public Transaction createTransaction() {
1405: long this _commit_id;
1406: ArrayList this _committed_tables = new ArrayList();
1407:
1408: // Don't let a commit happen while we are looking at this.
1409: synchronized (commit_lock) {
1410:
1411: this _commit_id = commit_id;
1412: StateResource[] committed_table_list = state_store
1413: .getVisibleList();
1414: for (int i = 0; i < committed_table_list.length; ++i) {
1415: this _committed_tables
1416: .add(getMasterTable((int) committed_table_list[i].table_id));
1417: }
1418:
1419: // Create a set of IndexSet for all the tables in this transaction.
1420: int sz = this _committed_tables.size();
1421: ArrayList index_info = new ArrayList(sz);
1422: for (int i = 0; i < sz; ++i) {
1423: MasterTableDataSource mtable = (MasterTableDataSource) this _committed_tables
1424: .get(i);
1425: index_info.add(mtable.createIndexSet());
1426: }
1427:
1428: // Create the transaction and record it in the open transactions list.
1429: Transaction t = new Transaction(this , this _commit_id,
1430: this _committed_tables, index_info);
1431: open_transactions.addTransaction(t);
1432: return t;
1433:
1434: }
1435:
1436: }
1437:
1438: /**
1439: * This is called to notify the conglomerate that the transaction has
1440: * closed. This is always called from either the rollback or commit method
1441: * of the transaction object.
1442: * <p>
1443: * NOTE: This increments 'commit_id' and requires that the conglomerate is
1444: * commit locked.
1445: */
1446: private void closeTransaction(Transaction transaction) {
1447: boolean last_transaction = false;
1448: // Closing must happen under a commit lock.
1449: synchronized (commit_lock) {
1450: open_transactions.removeTransaction(transaction);
1451: // Increment the commit id.
1452: ++commit_id;
1453: // Was that the last transaction?
1454: last_transaction = open_transactions.count() == 0;
1455: }
1456:
1457: // If last transaction then schedule a clean up event.
1458: if (last_transaction) {
1459: try {
1460: cleanUpConglomerate();
1461: } catch (IOException e) {
1462: Debug().write(Lvl.ERROR, this ,
1463: "Error cleaning up conglomerate");
1464: Debug().writeException(Lvl.ERROR, e);
1465: }
1466: }
1467:
1468: }
1469:
1470: /**
1471: * Closes and drops the MasterTableDataSource. This should only be called
1472: * from the clean up method (cleanUpConglomerate()).
1473: * <p>
1474: * Returns true if the drop succeeded. A drop may fail if, for example, the
1475: * roots of the table are locked.
1476: * <p>
1477: * Note that the table_file_name will be encoded with the table type. For
1478: * example, ":2mighty.koi"
1479: */
1480: private boolean closeAndDropTable(String table_file_name)
1481: throws IOException {
1482: // Find the table with this file name.
1483: for (int i = 0; i < table_list.size(); ++i) {
1484: MasterTableDataSource t = (MasterTableDataSource) table_list
1485: .get(i);
1486: String enc_fn = table_file_name.substring(2);
1487: if (t.getSourceIdent().equals(enc_fn)) {
1488: // Close and remove from the list.
1489: if (t.isRootLocked()) {
1490: // We can't drop a table that has roots locked..
1491: return false;
1492: }
1493:
1494: // This drops if the table has been marked as being dropped.
1495: boolean b = t.drop();
1496: if (b) {
1497: table_list.remove(i);
1498: }
1499: return b;
1500: }
1501: }
1502: return false;
1503: }
1504:
1505: /**
1506: * Closes the MasterTableDataSource with the given source ident. This should
1507: * only be called from the clean up method (cleanUpConglomerate()).
1508: * <p>
1509: * Note that the table_file_name will be encoded with the table type. For
1510: * example, ":2mighty.koi"
1511: */
1512: private void closeTable(String table_file_name, boolean pending_drop)
1513: throws IOException {
1514: // Find the table with this file name.
1515: for (int i = 0; i < table_list.size(); ++i) {
1516: MasterTableDataSource t = (MasterTableDataSource) table_list
1517: .get(i);
1518: String enc_fn = table_file_name.substring(2);
1519: if (t.getSourceIdent().equals(enc_fn)) {
1520: // Close and remove from the list.
1521: if (t.isRootLocked()) {
1522: // We can't drop a table that has roots locked..
1523: return;
1524: }
1525:
1526: // This closes the table
1527: t.dispose(pending_drop);
1528: return;
1529: }
1530: }
1531: return;
1532: }
1533:
1534: /**
1535: * Cleans up the conglomerate by deleting all tables marked as deleted.
1536: * This should be called when the conglomerate is opened, shutdown and
1537: * when there are no transactions open.
1538: */
1539: private void cleanUpConglomerate() throws IOException {
1540: synchronized (commit_lock) {
1541: if (isClosed()) {
1542: return;
1543: }
1544:
1545: // If no open transactions on the database, then clean up.
1546: if (open_transactions.count() == 0) {
1547:
1548: StateResource[] delete_list = state_store
1549: .getDeleteList();
1550: if (delete_list.length > 0) {
1551: int drop_count = 0;
1552:
1553: for (int i = delete_list.length - 1; i >= 0; --i) {
1554: String fn = (String) delete_list[i].name;
1555: closeTable(fn, true);
1556: }
1557:
1558: // // NASTY HACK: The native win32 file mapping will not
1559: // // let you delete a file that is mapped. The NIO API does not allow
1560: // // you to manually unmap a file, and the only way to unmap
1561: // // memory under win32 is to wait for the garbage collector to
1562: // // free it. So this is a hack to try and make the engine
1563: // // unmap the memory mapped buffer.
1564: // //
1565: // // This is not a problem under Unix/Linux because the OS has no
1566: // // difficulty deleting a file that is mapped.
1567: //
1568: // System.gc();
1569: // try {
1570: // Thread.sleep(5);
1571: // }
1572: // catch (InterruptedException e) { /* ignore */ }
1573:
1574: for (int i = delete_list.length - 1; i >= 0; --i) {
1575: String fn = (String) delete_list[i].name;
1576: boolean dropped = closeAndDropTable(fn);
1577: // If we managed to drop the table, remove from the list.
1578: if (dropped) {
1579: state_store.removeDeleteResource(fn);
1580: ++drop_count;
1581: }
1582: }
1583:
1584: // If we dropped a table, commit an update to the conglomerate state.
1585: if (drop_count > 0) {
1586: state_store.commit();
1587: }
1588: }
1589:
1590: }
1591: }
1592: }
1593:
1594: // ---------- Detection of constraint violations ----------
1595:
1596: /**
1597: * A variable resolver for a single row of a table source. Used when
1598: * evaluating a check constraint for newly added row.
1599: */
1600: private static class TableRowVariableResolver implements
1601: VariableResolver {
1602:
1603: private TableDataSource table;
1604: private int row_index = -1;
1605:
1606: public TableRowVariableResolver(TableDataSource table, int row) {
1607: this .table = table;
1608: this .row_index = row;
1609: }
1610:
1611: private int findColumnName(Variable variable) {
1612: int col_index = table.getDataTableDef().findColumnName(
1613: variable.getName());
1614: if (col_index == -1) {
1615: throw new Error("Can't find column: " + variable);
1616: }
1617: return col_index;
1618: }
1619:
1620: // --- Implemented ---
1621:
1622: public int setID() {
1623: return row_index;
1624: }
1625:
1626: public TObject resolve(Variable variable) {
1627: int col_index = findColumnName(variable);
1628: return table.getCellContents(col_index, row_index);
1629: }
1630:
1631: public TType returnTType(Variable variable) {
1632: int col_index = findColumnName(variable);
1633: return table.getDataTableDef().columnAt(col_index)
1634: .getTType();
1635: }
1636:
1637: }
1638:
1639: /**
1640: * Convenience, converts a String[] array to a comma deliminated string
1641: * list.
1642: */
1643: static String stringColumnList(String[] list) {
1644: StringBuffer buf = new StringBuffer();
1645: for (int i = 0; i < list.length - 1; ++i) {
1646: buf.append(list[i]);
1647: }
1648: buf.append(list[list.length - 1]);
1649: return new String(buf);
1650: }
1651:
1652: /**
1653: * Convenience, returns either 'Immediate' or 'Deferred' dependant on the
1654: * deferred short.
1655: */
1656: static String deferredString(short deferred) {
1657: switch (deferred) {
1658: case (Transaction.INITIALLY_IMMEDIATE):
1659: return "Immediate";
1660: case (Transaction.INITIALLY_DEFERRED):
1661: return "Deferred";
1662: default:
1663: throw new Error("Unknown deferred string.");
1664: }
1665: }
1666:
1667: /**
1668: * Returns a list of column indices into the given DataTableDef for the
1669: * given column names.
1670: */
1671: static int[] findColumnIndices(DataTableDef table_def, String[] cols) {
1672: // Resolve the list of column names to column indexes
1673: int[] col_indexes = new int[cols.length];
1674: for (int i = 0; i < cols.length; ++i) {
1675: col_indexes[i] = table_def.findColumnName(cols[i]);
1676: }
1677: return col_indexes;
1678: }
1679:
1680: /**
1681: * Checks the uniqueness of the columns in the row of the table. If
1682: * the given column information in the row data is not unique then it
1683: * returns false. We also check for a NULL values - a PRIMARY KEY constraint
1684: * does not allow NULL values, whereas a UNIQUE constraint does.
1685: */
1686: private static boolean isUniqueColumns(TableDataSource table,
1687: int rindex, String[] cols, boolean nulls_are_allowed) {
1688:
1689: DataTableDef table_def = table.getDataTableDef();
1690: // 'identical_rows' keeps a tally of the rows that match our added cell.
1691: IntegerVector identical_rows = null;
1692:
1693: // Resolve the list of column names to column indexes
1694: int[] col_indexes = findColumnIndices(table_def, cols);
1695:
1696: // If the value being tested for uniqueness contains NULL, we return true
1697: // if nulls are allowed.
1698: for (int i = 0; i < col_indexes.length; ++i) {
1699: TObject cell = table
1700: .getCellContents(col_indexes[i], rindex);
1701: if (cell.isNull()) {
1702: return nulls_are_allowed;
1703: }
1704: }
1705:
1706: for (int i = 0; i < col_indexes.length; ++i) {
1707:
1708: int col_index = col_indexes[i];
1709:
1710: // Get the column definition and the cell being inserted,
1711: // DataTableColumnDef column_def = table_def.columnAt(col_index);
1712: TObject cell = table.getCellContents(col_index, rindex);
1713:
1714: // We are assured of uniqueness if 'identical_rows != null &&
1715: // identical_rows.size() == 0' This is because 'identical_rows' keeps
1716: // a running tally of the rows in the table that contain unique columns
1717: // whose cells match the record being added.
1718:
1719: if (identical_rows == null || identical_rows.size() > 0) {
1720:
1721: // Ask SelectableScheme to return pointers to row(s) if there is
1722: // already a cell identical to this in the table.
1723:
1724: SelectableScheme ss = table.getColumnScheme(col_index);
1725: IntegerVector ivec = ss.selectEqual(cell);
1726:
1727: // If 'identical_rows' hasn't been set up yet then set it to 'ivec'
1728: // (the list of rows where there is a cell which is equal to the one
1729: // being added)
1730: // If 'identical_rows' has been set up, then perform an
1731: // 'intersection' operation on the two lists (only keep the numbers
1732: // that are repeated in both lists). Therefore we keep the rows
1733: // that match the row being added.
1734:
1735: if (identical_rows == null) {
1736: identical_rows = ivec;
1737: } else {
1738: ivec.quickSort();
1739: int row_index = identical_rows.size() - 1;
1740: while (row_index >= 0) {
1741: int val = identical_rows.intAt(row_index);
1742: int found_index = ivec.sortedIndexOf(val);
1743: // If we _didn't_ find the index in the array
1744: if (found_index >= ivec.size()
1745: || ivec.intAt(found_index) != val) {
1746: identical_rows.removeIntAt(row_index);
1747: }
1748: --row_index;
1749: }
1750: }
1751:
1752: }
1753:
1754: } // for each column
1755:
1756: // If there is 1 (the row we added) then we are unique, otherwise we are
1757: // not.
1758: if (identical_rows != null) {
1759: int sz = identical_rows.size();
1760: if (sz == 1) {
1761: return true;
1762: }
1763: if (sz > 1) {
1764: return false;
1765: } else if (sz == 0) {
1766: throw new Error(
1767: "Assertion failed: We must be able to find the "
1768: + "row we are testing uniqueness against!");
1769: }
1770: }
1771: return true;
1772:
1773: }
1774:
1775: /**
1776: * Returns the key indices found in the given table. The keys are
1777: * in the given column indices, and the key is in the 'key' array. This can
1778: * be used to count the number of keys found in a table for constraint
1779: * violation checking.
1780: */
1781: static IntegerVector findKeys(TableDataSource t2,
1782: int[] col2_indexes, TObject[] key_value) {
1783:
1784: int key_size = key_value.length;
1785: // Now query table 2 to determine if the key values are present.
1786: // Use index scan on first key.
1787: SelectableScheme ss = t2.getColumnScheme(col2_indexes[0]);
1788: IntegerVector list = ss.selectEqual(key_value[0]);
1789: if (key_size > 1) {
1790: // Full scan for the rest of the columns
1791: int sz = list.size();
1792: // For each element of the list
1793: for (int i = sz - 1; i >= 0; --i) {
1794: int r_index = list.intAt(i);
1795: // For each key in the column list
1796: for (int c = 1; c < key_size; ++c) {
1797: int col_index = col2_indexes[c];
1798: TObject c_value = key_value[c];
1799: if (c_value.compareTo(t2.getCellContents(col_index,
1800: r_index)) != 0) {
1801: // If any values in the key are not equal set this flag to false
1802: // and remove the index from the list.
1803: list.removeIntAt(i);
1804: // Break the for loop
1805: break;
1806: }
1807: }
1808: }
1809: }
1810:
1811: return list;
1812: }
1813:
1814: /**
1815: * Finds the number of rows that are referenced between the given row of
1816: * table1 and that match table2. This method is used to determine if
1817: * there are referential links.
1818: * <p>
1819: * If this method returns -1 it means the value being searched for is NULL
1820: * therefore we can't determine if there are any referenced links.
1821: * <p>
1822: * HACK: If 'check_source_table_key' is set then the key is checked for in
1823: * the source table and if it exists returns 0. Otherwise it looks for
1824: * references to the key in table2.
1825: */
1826: private static int rowCountOfReferenceTable(
1827: SimpleTransaction transaction, int row_index,
1828: TableName table1, String[] cols1, TableName table2,
1829: String[] cols2, boolean check_source_table_key) {
1830:
1831: // Get the tables
1832: TableDataSource t1 = transaction.getTableDataSource(table1);
1833: TableDataSource t2 = transaction.getTableDataSource(table2);
1834: // The table defs
1835: DataTableDef dtd1 = t1.getDataTableDef();
1836: DataTableDef dtd2 = t2.getDataTableDef();
1837: // Resolve the list of column names to column indexes
1838: int[] col1_indexes = findColumnIndices(dtd1, cols1);
1839: int[] col2_indexes = findColumnIndices(dtd2, cols2);
1840:
1841: int key_size = col1_indexes.length;
1842: // Get the data from table1
1843: TObject[] key_value = new TObject[key_size];
1844: int null_count = 0;
1845: for (int n = 0; n < key_size; ++n) {
1846: key_value[n] = t1.getCellContents(col1_indexes[n],
1847: row_index);
1848: if (key_value[n].isNull()) {
1849: ++null_count;
1850: }
1851: }
1852:
1853: // If we are searching for null then return -1;
1854: if (null_count > 0) {
1855: return -1;
1856: }
1857:
1858: // HACK: This is a hack. The purpose is if the key exists in the source
1859: // table we return 0 indicating to the delete check that there are no
1860: // references and it's valid. To the semantics of the method this is
1861: // incorrect.
1862: if (check_source_table_key) {
1863: IntegerVector keys = findKeys(t1, col1_indexes, key_value);
1864: int key_count = keys.size();
1865: if (key_count > 0) {
1866: return 0;
1867: }
1868: }
1869:
1870: return findKeys(t2, col2_indexes, key_value).size();
1871: }
1872:
1873: /**
1874: * Checks that the nullibility and class of the fields in the given
1875: * rows are valid. Should be used as part of the insert procedure.
1876: */
1877: static void checkFieldConstraintViolations(
1878: SimpleTransaction transaction, TableDataSource table,
1879: int[] row_indices) {
1880:
1881: // Quick exit case
1882: if (row_indices == null || row_indices.length == 0) {
1883: return;
1884: }
1885:
1886: // Check for any bad cells - which are either cells that are 'null' in a
1887: // column declared as 'not null', or duplicated in a column declared as
1888: // unique.
1889:
1890: DataTableDef table_def = table.getDataTableDef();
1891: TableName table_name = table_def.getTableName();
1892:
1893: // Check not-null columns are not null. If they are null, throw an
1894: // error. Additionally check that JAVA_OBJECT columns are correctly
1895: // typed.
1896:
1897: // Check each field of the added rows
1898: int len = table_def.columnCount();
1899: for (int i = 0; i < len; ++i) {
1900:
1901: // Get the column definition and the cell being inserted,
1902: DataTableColumnDef column_def = table_def.columnAt(i);
1903: // For each row added to this column
1904: for (int rn = 0; rn < row_indices.length; ++rn) {
1905: TObject cell = table
1906: .getCellContents(i, row_indices[rn]);
1907:
1908: // Check: Column defined as not null and cell being inserted is
1909: // not null.
1910: if (column_def.isNotNull() && cell.isNull()) {
1911: throw new DatabaseConstraintViolationException(
1912: DatabaseConstraintViolationException.NULLABLE_VIOLATION,
1913: "You tried to add 'null' cell to column '"
1914: + table_def.columnAt(i).getName()
1915: + "' which is declared as 'not_null'");
1916: }
1917:
1918: // Check: If column is a java object, then deserialize and check the
1919: // object is an instance of the class constraint,
1920: if (!cell.isNull()
1921: && column_def.getSQLType() == com.mckoi.database.global.SQLTypes.JAVA_OBJECT) {
1922: String class_constraint = column_def
1923: .getClassConstraint();
1924: // Everything is derived from java.lang.Object so this optimization
1925: // will not cause an object deserialization.
1926: if (!class_constraint.equals("java.lang.Object")) {
1927: // Get the binary representation of the java object
1928: ByteLongObject serialized_jobject = (ByteLongObject) cell
1929: .getObject();
1930: // Deserialize the object
1931: Object ob = ObjectTranslator
1932: .deserialize(serialized_jobject);
1933: // Check it's assignable from the constraining class
1934: if (!ob.getClass().isAssignableFrom(
1935: column_def.getClassConstraintAsClass())) {
1936: throw new DatabaseConstraintViolationException(
1937: DatabaseConstraintViolationException.JAVA_TYPE_VIOLATION,
1938: "The Java object being inserted is not derived from the "
1939: + "class constraint defined for the column ("
1940: + class_constraint + ")");
1941: }
1942: }
1943: }
1944:
1945: } // For each row being added
1946:
1947: } // for each column
1948:
1949: }
1950:
1951: /**
1952: * Performs constraint violation checks on an addition of the given set of
1953: * row indices into the TableDataSource in the given transaction. If a
1954: * violation is detected a DatabaseConstraintViolationException is thrown.
1955: * <p>
1956: * If deferred = IMMEDIATE only immediate constraints are tested. If
1957: * deferred = DEFERRED all constraints are tested.
1958: *
1959: * @param transaction the Transaction instance used to determine table
1960: * constraints.
1961: * @param table the table to test
1962: * @param row_indices the list of rows that were added to the table.
1963: * @param deferred '1' indicates Transaction.IMMEDIATE,
1964: * '2' indicates Transaction.DEFERRED.
1965: */
1966: static void checkAddConstraintViolations(
1967: SimpleTransaction transaction, TableDataSource table,
1968: int[] row_indices, short deferred) {
1969:
1970: String cur_schema = table.getDataTableDef().getSchema();
1971: QueryContext context = new SystemQueryContext(transaction,
1972: cur_schema);
1973:
1974: // Quick exit case
1975: if (row_indices == null || row_indices.length == 0) {
1976: return;
1977: }
1978:
1979: DataTableDef table_def = table.getDataTableDef();
1980: TableName table_name = table_def.getTableName();
1981:
1982: // ---- Constraint checking ----
1983:
1984: // Check any primary key constraint.
1985: Transaction.ColumnGroup primary_key = Transaction
1986: .queryTablePrimaryKeyGroup(transaction, table_name);
1987: if (primary_key != null
1988: && (deferred == Transaction.INITIALLY_DEFERRED || primary_key.deferred == Transaction.INITIALLY_IMMEDIATE)) {
1989:
1990: // For each row added to this column
1991: for (int rn = 0; rn < row_indices.length; ++rn) {
1992: if (!isUniqueColumns(table, row_indices[rn],
1993: primary_key.columns, false)) {
1994: throw new DatabaseConstraintViolationException(
1995: DatabaseConstraintViolationException.PRIMARY_KEY_VIOLATION,
1996: deferredString(deferred)
1997: + " primary Key constraint violation ("
1998: + primary_key.name
1999: + ") Columns = ( "
2000: + stringColumnList(primary_key.columns)
2001: + " ) Table = ( "
2002: + table_name.toString() + " )");
2003: }
2004: } // For each row being added
2005:
2006: }
2007:
2008: // Check any unique constraints.
2009: Transaction.ColumnGroup[] unique_constraints = Transaction
2010: .queryTableUniqueGroups(transaction, table_name);
2011: for (int i = 0; i < unique_constraints.length; ++i) {
2012: Transaction.ColumnGroup unique = unique_constraints[i];
2013: if (deferred == Transaction.INITIALLY_DEFERRED
2014: || unique.deferred == Transaction.INITIALLY_IMMEDIATE) {
2015:
2016: // For each row added to this column
2017: for (int rn = 0; rn < row_indices.length; ++rn) {
2018: if (!isUniqueColumns(table, row_indices[rn],
2019: unique.columns, true)) {
2020: throw new DatabaseConstraintViolationException(
2021: DatabaseConstraintViolationException.UNIQUE_VIOLATION,
2022: deferredString(deferred)
2023: + " unique constraint violation ("
2024: + unique.name
2025: + ") Columns = ( "
2026: + stringColumnList(unique.columns)
2027: + " ) Table = ( "
2028: + table_name.toString() + " )");
2029: }
2030: } // For each row being added
2031:
2032: }
2033: }
2034:
2035: // Check any foreign key constraints.
2036: // This ensures all foreign references in the table are referenced
2037: // to valid records.
2038: Transaction.ColumnGroupReference[] foreign_constraints = Transaction
2039: .queryTableForeignKeyReferences(transaction, table_name);
2040: for (int i = 0; i < foreign_constraints.length; ++i) {
2041: Transaction.ColumnGroupReference ref = foreign_constraints[i];
2042: if (deferred == Transaction.INITIALLY_DEFERRED
2043: || ref.deferred == Transaction.INITIALLY_IMMEDIATE) {
2044: // For each row added to this column
2045: for (int rn = 0; rn < row_indices.length; ++rn) {
2046: // Make sure the referenced record exists
2047:
2048: // Return the count of records where the given row of
2049: // table_name(columns, ...) IN
2050: // ref_table_name(ref_columns, ...)
2051: int row_count = rowCountOfReferenceTable(
2052: transaction, row_indices[rn],
2053: ref.key_table_name, ref.key_columns,
2054: ref.ref_table_name, ref.ref_columns, false);
2055: if (row_count == -1) {
2056: // foreign key is NULL
2057: }
2058: if (row_count == 0) {
2059: throw new DatabaseConstraintViolationException(
2060: DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION,
2061: deferredString(deferred)
2062: + " foreign key constraint violation ("
2063: + ref.name
2064: + ") Columns = "
2065: + ref.key_table_name.toString()
2066: + "( "
2067: + stringColumnList(ref.key_columns)
2068: + " ) -> "
2069: + ref.ref_table_name.toString()
2070: + "( "
2071: + stringColumnList(ref.ref_columns)
2072: + " )");
2073: }
2074: } // For each row being added.
2075: }
2076: }
2077:
2078: // Any general checks of the inserted data
2079: Transaction.CheckExpression[] check_constraints = Transaction
2080: .queryTableCheckExpressions(transaction, table_name);
2081:
2082: // The TransactionSystem object
2083: TransactionSystem system = transaction.getSystem();
2084:
2085: // For each check constraint, check that it evaluates to true.
2086: for (int i = 0; i < check_constraints.length; ++i) {
2087: Transaction.CheckExpression check = check_constraints[i];
2088: if (deferred == Transaction.INITIALLY_DEFERRED
2089: || check.deferred == Transaction.INITIALLY_IMMEDIATE) {
2090:
2091: check = system.prepareTransactionCheckConstraint(
2092: table_def, check);
2093: Expression exp = check.expression;
2094:
2095: // For each row being added to this column
2096: for (int rn = 0; rn < row_indices.length; ++rn) {
2097: TableRowVariableResolver resolver = new TableRowVariableResolver(
2098: table, row_indices[rn]);
2099: TObject ob = exp.evaluate(null, resolver, context);
2100: Boolean b = ob.toBoolean();
2101:
2102: if (b != null) {
2103: if (b.equals(Boolean.FALSE)) {
2104: // Evaluated to false so don't allow this row to be added.
2105: throw new DatabaseConstraintViolationException(
2106: DatabaseConstraintViolationException.CHECK_VIOLATION,
2107: deferredString(deferred)
2108: + " check constraint violation ("
2109: + check.name
2110: + ") - '"
2111: + exp.text()
2112: + "' evaluated to false for inserted/updated row.");
2113: }
2114: } else {
2115: // NOTE: This error will pass the row by default
2116: transaction
2117: .Debug()
2118: .write(
2119: Lvl.ERROR,
2120: TableDataConglomerate.class,
2121: deferredString(deferred)
2122: + " check constraint violation ("
2123: + check.name
2124: + ") - '"
2125: + exp.text()
2126: + "' returned a non boolean or NULL result.");
2127: }
2128: } // For each row being added
2129:
2130: }
2131: }
2132:
2133: }
2134:
2135: /**
2136: * Performs constraint violation checks on an addition of the given
2137: * row index into the TableDataSource in the given transaction. If a
2138: * violation is detected a DatabaseConstraintViolationException is thrown.
2139: * <p>
2140: * If deferred = IMMEDIATE only immediate constraints are tested. If
2141: * deferred = DEFERRED all constraints are tested.
2142: *
2143: * @param transaction the Transaction instance used to determine table
2144: * constraints.
2145: * @param table the table to test
2146: * @param row_index the row that was added to the table.
2147: * @param deferred '1' indicates Transaction.IMMEDIATE,
2148: * '2' indicates Transaction.DEFERRED.
2149: */
2150: static void checkAddConstraintViolations(
2151: SimpleTransaction transaction, TableDataSource table,
2152: int row_index, short deferred) {
2153: checkAddConstraintViolations(transaction, table,
2154: new int[] { row_index }, deferred);
2155: }
2156:
2157: /**
2158: * Performs constraint violation checks on a removal of the given set of
2159: * row indexes from the TableDataSource in the given transaction. If a
2160: * violation is detected a DatabaseConstraintViolationException is thrown.
2161: * <p>
2162: * If deferred = IMMEDIATE only immediate constraints are tested. If
2163: * deferred = DEFERRED all constraints are tested.
2164: *
2165: * @param transaction the Transaction instance used to determine table
2166: * constraints.
2167: * @param table the table to test
2168: * @param row_indices the set of rows that were removed from the table.
2169: * @param deferred '1' indicates Transaction.IMMEDIATE,
2170: * '2' indicates Transaction.DEFERRED.
2171: */
2172: static void checkRemoveConstraintViolations(
2173: SimpleTransaction transaction, TableDataSource table,
2174: int[] row_indices, short deferred) {
2175:
2176: // Quick exit case
2177: if (row_indices == null || row_indices.length == 0) {
2178: return;
2179: }
2180:
2181: DataTableDef table_def = table.getDataTableDef();
2182: TableName table_name = table_def.getTableName();
2183:
2184: // Check any imported foreign key constraints.
2185: // This ensures that a referential reference can not be removed making
2186: // it invalid.
2187: Transaction.ColumnGroupReference[] foreign_constraints = Transaction
2188: .queryTableImportedForeignKeyReferences(transaction,
2189: table_name);
2190: for (int i = 0; i < foreign_constraints.length; ++i) {
2191: Transaction.ColumnGroupReference ref = foreign_constraints[i];
2192: if (deferred == Transaction.INITIALLY_DEFERRED
2193: || ref.deferred == Transaction.INITIALLY_IMMEDIATE) {
2194: // For each row removed from this column
2195: for (int rn = 0; rn < row_indices.length; ++rn) {
2196: // Make sure the referenced record exists
2197:
2198: // Return the count of records where the given row of
2199: // ref_table_name(columns, ...) IN
2200: // table_name(ref_columns, ...)
2201: int row_count = rowCountOfReferenceTable(
2202: transaction, row_indices[rn],
2203: ref.ref_table_name, ref.ref_columns,
2204: ref.key_table_name, ref.key_columns, true);
2205: // There must be 0 references otherwise the delete isn't allowed to
2206: // happen.
2207: if (row_count > 0) {
2208: throw new DatabaseConstraintViolationException(
2209: DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION,
2210: deferredString(deferred)
2211: + " foreign key constraint violation "
2212: + "on delete ("
2213: + ref.name
2214: + ") Columns = "
2215: + ref.key_table_name.toString()
2216: + "( "
2217: + stringColumnList(ref.key_columns)
2218: + " ) -> "
2219: + ref.ref_table_name.toString()
2220: + "( "
2221: + stringColumnList(ref.ref_columns)
2222: + " )");
2223: }
2224: } // For each row being added.
2225: }
2226: }
2227:
2228: }
2229:
2230: /**
2231: * Performs constraint violation checks on a removal of the given
2232: * row index from the TableDataSource in the given transaction. If a
2233: * violation is detected a DatabaseConstraintViolationException is thrown.
2234: * <p>
2235: * If deferred = IMMEDIATE only immediate constraints are tested. If
2236: * deferred = DEFERRED all constraints are tested.
2237: *
2238: * @param transaction the Transaction instance used to determine table
2239: * constraints.
2240: * @param table the table to test
2241: * @param row_index the row that was removed from the table.
2242: * @param deferred '1' indicates Transaction.IMMEDIATE,
2243: * '2' indicates Transaction.DEFERRED.
2244: */
2245: static void checkRemoveConstraintViolations(
2246: SimpleTransaction transaction, TableDataSource table,
2247: int row_index, short deferred) {
2248: checkRemoveConstraintViolations(transaction, table,
2249: new int[] { row_index }, deferred);
2250: }
2251:
2252: /**
2253: * Performs constraint violation checks on all the rows in the given
2254: * table. If a violation is detected a DatabaseConstraintViolationException
2255: * is thrown.
2256: * <p>
2257: * This method is useful when the constraint schema of a table changes and
2258: * we need to check existing data in a table is conformant with the new
2259: * constraint changes.
2260: * <p>
2261: * If deferred = IMMEDIATE only immediate constraints are tested. If
2262: * deferred = DEFERRED all constraint are tested.
2263: */
2264: static void checkAllAddConstraintViolations(
2265: SimpleTransaction transaction, TableDataSource table,
2266: short deferred) {
2267: // Get all the rows in the table
2268: int[] rows = new int[table.getRowCount()];
2269: RowEnumeration row_enum = table.rowEnumeration();
2270: int p = 0;
2271: while (row_enum.hasMoreRows()) {
2272: rows[p] = row_enum.nextRowIndex();
2273: ++p;
2274: }
2275: // Check the constraints of all the rows in the table.
2276: checkAddConstraintViolations(transaction, table, rows,
2277: Transaction.INITIALLY_DEFERRED);
2278: }
2279:
2280: // ---------- Blob store and object management ----------
2281:
2282: /**
2283: * Creates and allocates storage for a new large object in the blob store.
2284: * This is called to create a new large object before filling it with data
2285: * sent from the client.
2286: */
2287: Ref createNewLargeObject(byte type, long size) {
2288: try {
2289: // If the conglomerate is read-only, a blob can not be created.
2290: if (isReadOnly()) {
2291: throw new RuntimeException(
2292: "A new large object can not be allocated "
2293: + "with a read-only conglomerate");
2294: }
2295: // Allocate the large object from the store
2296: Ref ref = blob_store.allocateLargeObject(type, size);
2297: // Return the large object reference
2298: return ref;
2299: } catch (IOException e) {
2300: Debug().writeException(e);
2301: throw new RuntimeException("IO Error when creating blob: "
2302: + e.getMessage());
2303: }
2304: }
2305:
2306: /**
2307: * Called when one or more blobs has been completed. This flushes the blob
2308: * to the blob store and completes the blob write procedure. It's important
2309: * this is called otherwise the BlobStore may not be correctly flushed to
2310: * disk with the changes and the data will not be recoverable if a crash
2311: * occurs.
2312: */
2313: void flushBlobStore() {
2314: // NOTE: no longer necessary - please deprecate
2315: }
2316:
2317: // ---------- Conglomerate diagnosis and repair methods ----------
2318:
2319: /**
2320: * Checks the conglomerate state file. The returned ErrorState object
2321: * contains information about any error generated.
2322: */
2323: public void fix(String name, UserTerminal terminal) {
2324: this .name = name;
2325:
2326: try {
2327:
2328: String state_fn = (name + STATE_POST);
2329: boolean state_exists = false;
2330: try {
2331: state_exists = exists(name);
2332: } catch (IOException e) {
2333: terminal
2334: .println("IO Error when checking if state store exists: "
2335: + e.getMessage());
2336: e.printStackTrace();
2337: }
2338:
2339: if (!state_exists) {
2340: terminal.println("Couldn't find store: " + state_fn);
2341: return;
2342: }
2343: terminal.println("+ Found state store: " + state_fn);
2344:
2345: // Open the state store
2346: try {
2347: act_state_store = storeSystem().openStore(
2348: name + STATE_POST);
2349: state_store = new StateStore(act_state_store);
2350: // Get the 64 byte fixed area
2351: Area fixed_area = act_state_store.getArea(-1);
2352: long head_p = fixed_area.getLong();
2353: state_store.init(head_p);
2354: terminal.println("+ Initialized the state store: "
2355: + state_fn);
2356: } catch (IOException e) {
2357: // Couldn't initialize the state file.
2358: terminal.println("Couldn't initialize the state file: "
2359: + state_fn + " Reason: " + e.getMessage());
2360: return;
2361: }
2362:
2363: // Initialize the blob store
2364: try {
2365: initializeBlobStore();
2366: } catch (IOException e) {
2367: terminal.println("Error intializing BlobStore: "
2368: + e.getMessage());
2369: e.printStackTrace();
2370: return;
2371: }
2372: // Setup internal
2373: setupInternal();
2374:
2375: try {
2376: checkVisibleTables(terminal);
2377:
2378: // Reset the sequence id's for the system tables
2379: terminal
2380: .println("+ RESETTING ALL SYSTEM TABLE UNIQUE ID VALUES.");
2381: resetAllSystemTableID();
2382:
2383: // Some diagnostic information
2384: StringBuffer buf = new StringBuffer();
2385: MasterTableDataSource t;
2386: StateResource[] committed_tables = state_store
2387: .getVisibleList();
2388: StateResource[] committed_dropped = state_store
2389: .getDeleteList();
2390: for (int i = 0; i < committed_tables.length; ++i) {
2391: terminal.println("+ COMMITTED TABLE: "
2392: + committed_tables[i].name);
2393: }
2394: for (int i = 0; i < committed_dropped.length; ++i) {
2395: terminal.println("+ COMMIT DROPPED TABLE: "
2396: + committed_dropped[i].name);
2397: }
2398:
2399: return;
2400:
2401: } catch (IOException e) {
2402: terminal.println("IOException: " + e.getMessage());
2403: e.printStackTrace();
2404: }
2405:
2406: } finally {
2407: try {
2408: close();
2409: } catch (IOException e) {
2410: terminal
2411: .println("Unable to close conglomerate after fix.");
2412: }
2413: }
2414:
2415: }
2416:
2417: // ---------- Conveniences for commit ----------
2418:
2419: /**
2420: * A static container class for information collected about a table during
2421: * the commit cycle.
2422: */
2423: private static class CommitTableInfo {
2424: // The master table
2425: MasterTableDataSource master;
2426: // The immutable index set
2427: IndexSet index_set;
2428: // The journal describing the changes to this table by this
2429: // transaction.
2430: MasterTableJournal journal;
2431: // A list of journals describing changes since this transaction
2432: // started.
2433: MasterTableJournal[] changes_since_commit;
2434: // Break down of changes to the table
2435: // Normalized list of row ids that were added
2436: int[] norm_added_rows;
2437: // Normalized list of row ids that were removed
2438: int[] norm_removed_rows;
2439: }
2440:
2441: /**
2442: * Returns true if the given List of 'CommitTableInfo' objects contains an
2443: * entry for the given master table.
2444: */
2445: private static boolean commitTableListContains(List list,
2446: MasterTableDataSource master) {
2447: int sz = list.size();
2448: for (int i = 0; i < sz; ++i) {
2449: CommitTableInfo info = (CommitTableInfo) list.get(i);
2450: if (info.master.equals(master)) {
2451: return true;
2452: }
2453: }
2454: return false;
2455: }
2456:
2457: // ---------- low level File IO level operations on a conglomerate ----------
2458: // These operations are low level IO operations on the contents of the
2459: // conglomerate. How the rows and tables are organised is up to the
2460: // transaction managemenet. These methods deal with the low level
2461: // operations of creating/dropping tables and adding, deleting and querying
2462: // row in tables.
2463:
2464: /**
2465: * Tries to commit a transaction to the conglomerate. This is called
2466: * by the 'closeAndCommit' method in Transaction. An overview of how this
2467: * works follows:
2468: * <ul>
2469: * <li> Determine if any transactions have been committed since this
2470: * transaction was created.
2471: * <li> If no transactions committed then commit this transaction and exit.
2472: * <li> Otherwise, determine the tables that have been changed by the
2473: * committed transactions since this was created.
2474: * <li> If no tables changed in the tables changed by this transaction then
2475: * commit this transaction and exit.
2476: * <li> Determine if there are any rows that have been deleted that this
2477: * transaction read/deleted.
2478: * <li> If there are then rollback this transaction and throw an error.
2479: * <li> Determine if any rows have been added to the tables this transaction
2480: * read/changed.
2481: * <li> If there are then rollback this transaction and throw an error.
2482: * <li> Otherwise commit the transaction.
2483: * </ul>
2484: *
2485: * @param transaction the transaction to commit from.
2486: * @param visible_tables the list of visible tables at the end of the commit
2487: * (MasterTableDataSource)
2488: * @param selected_from_tables ths list of tables that this transaction
2489: * performed 'select' like queries on (MasterTableDataSource)
2490: * @param touched_tables the list of tables touched by the transaction
2491: * (MutableTableDataSource)
2492: * @param journal the journal that describes all the changes within the
2493: * transaction.
2494: */
2495: void processCommit(Transaction transaction,
2496: ArrayList visible_tables, ArrayList selected_from_tables,
2497: ArrayList touched_tables, TransactionJournal journal)
2498: throws TransactionException {
2499:
2500: // Get individual journals for updates made to tables in this
2501: // transaction.
2502: // The list MasterTableJournal
2503: ArrayList journal_list = new ArrayList();
2504: for (int i = 0; i < touched_tables.size(); ++i) {
2505: MasterTableJournal table_journal = ((MutableTableDataSource) touched_tables
2506: .get(i)).getJournal();
2507: if (table_journal.entries() > 0) { // Check the journal has entries.
2508: journal_list.add(table_journal);
2509: }
2510: }
2511: MasterTableJournal[] changed_tables = (MasterTableJournal[]) journal_list
2512: .toArray(new MasterTableJournal[journal_list.size()]);
2513:
2514: // The list of tables created by this journal.
2515: IntegerVector created_tables = journal.getTablesCreated();
2516: // Ths list of tables dropped by this journal.
2517: IntegerVector dropped_tables = journal.getTablesDropped();
2518: // The list of tables that constraints were alter by this journal
2519: IntegerVector constraint_altered_tables = journal
2520: .getTablesConstraintAltered();
2521:
2522: // Exit early if nothing changed (this is a read-only transaction)
2523: if (changed_tables.length == 0 && created_tables.size() == 0
2524: && dropped_tables.size() == 0
2525: && constraint_altered_tables.size() == 0) {
2526: closeTransaction(transaction);
2527: return;
2528: }
2529:
2530: // This flag is set to true when entries from the changes tables are
2531: // at a point of no return. If this is false it is safe to rollback
2532: // changes if necessary.
2533: boolean entries_committed = false;
2534:
2535: // The tables that were actually changed (MasterTableDataSource)
2536: ArrayList changed_tables_list = new ArrayList();
2537:
2538: // Grab the commit lock.
2539: synchronized (commit_lock) {
2540:
2541: // Get the list of all database objects that were created in the
2542: // transaction.
2543: ArrayList database_objects_created = transaction
2544: .getAllNamesCreated();
2545: // Get the list of all database objects that were dropped in the
2546: // transaction.
2547: ArrayList database_objects_dropped = transaction
2548: .getAllNamesDropped();
2549:
2550: // This is a transaction that will represent the view of the database
2551: // at the end of the commit
2552: Transaction check_transaction = null;
2553:
2554: try {
2555:
2556: // ---- Commit check stage ----
2557:
2558: long tran_commit_id = transaction.getCommitID();
2559:
2560: // We only perform this check if transaction error on dirty selects
2561: // are enabled.
2562: if (transaction.transactionErrorOnDirtySelect()) {
2563:
2564: // For each table that this transaction selected from, if there are
2565: // any committed changes then generate a transaction error.
2566: for (int i = 0; i < selected_from_tables.size(); ++i) {
2567: MasterTableDataSource selected_table = (MasterTableDataSource) selected_from_tables
2568: .get(i);
2569: // Find all committed journals equal to or greater than this
2570: // transaction's commit_id.
2571: MasterTableJournal[] journals_since = selected_table
2572: .findAllJournalsSince(tran_commit_id);
2573: if (journals_since.length > 0) {
2574: // Yes, there are changes so generate transaction error and
2575: // rollback.
2576: throw new TransactionException(
2577: TransactionException.DIRTY_TABLE_SELECT,
2578: "Concurrent Serializable Transaction Conflict(4): "
2579: + "Select from table that has committed changes: "
2580: + selected_table.getName());
2581: }
2582: }
2583: }
2584:
2585: // Check there isn't a namespace clash with database objects.
2586: // We need to create a list of all create and drop activity in the
2587: // conglomerate from when the transaction started.
2588: ArrayList all_dropped_obs = new ArrayList();
2589: ArrayList all_created_obs = new ArrayList();
2590: int nsj_sz = namespace_journal_list.size();
2591: for (int i = 0; i < nsj_sz; ++i) {
2592: NameSpaceJournal ns_journal = (NameSpaceJournal) namespace_journal_list
2593: .get(i);
2594: if (ns_journal.commit_id >= tran_commit_id) {
2595: all_dropped_obs
2596: .addAll(ns_journal.dropped_names);
2597: all_created_obs
2598: .addAll(ns_journal.created_names);
2599: }
2600: }
2601:
2602: // The list of all dropped objects since this transaction
2603: // began.
2604: int ado_sz = all_dropped_obs.size();
2605: boolean conflict5 = false;
2606: Object conflict_name = null;
2607: String conflict_desc = "";
2608: for (int n = 0; n < ado_sz; ++n) {
2609: if (database_objects_dropped
2610: .contains(all_dropped_obs.get(n))) {
2611: conflict5 = true;
2612: conflict_name = all_dropped_obs.get(n);
2613: conflict_desc = "Drop Clash";
2614: }
2615: }
2616: // The list of all created objects since this transaction
2617: // began.
2618: int aco_sz = all_created_obs.size();
2619: for (int n = 0; n < aco_sz; ++n) {
2620: if (database_objects_created
2621: .contains(all_created_obs.get(n))) {
2622: conflict5 = true;
2623: conflict_name = all_created_obs.get(n);
2624: conflict_desc = "Create Clash";
2625: }
2626: }
2627: if (conflict5) {
2628: // Namespace conflict...
2629: throw new TransactionException(
2630: TransactionException.DUPLICATE_TABLE,
2631: "Concurrent Serializable Transaction Conflict(5): "
2632: + "Namespace conflict: "
2633: + conflict_name.toString() + " "
2634: + conflict_desc);
2635: }
2636:
2637: // For each journal,
2638: for (int i = 0; i < changed_tables.length; ++i) {
2639: MasterTableJournal change_journal = changed_tables[i];
2640: // The table the change was made to.
2641: int table_id = change_journal.getTableID();
2642: // Get the master table with this table id.
2643: MasterTableDataSource master = getMasterTable(table_id);
2644:
2645: // True if the state contains a committed resource with the given name
2646: boolean committed_resource = state_store
2647: .containsVisibleResource(table_id);
2648:
2649: // Check this table is still in the committed tables list.
2650: if (!created_tables.contains(table_id)
2651: && !committed_resource) {
2652: // This table is no longer a committed table, so rollback
2653: throw new TransactionException(
2654: TransactionException.TABLE_DROPPED,
2655: "Concurrent Serializable Transaction Conflict(2): "
2656: + "Table altered/dropped: "
2657: + master.getName());
2658: }
2659:
2660: // Since this journal was created, check to see if any changes to the
2661: // tables have been committed since.
2662: // This will return all journals on the table with the same commit_id
2663: // or greater.
2664: MasterTableJournal[] journals_since = master
2665: .findAllJournalsSince(tran_commit_id);
2666:
2667: // For each journal, determine if there's any clashes.
2668: for (int n = 0; n < journals_since.length; ++n) {
2669: // This will thrown an exception if a commit classes.
2670: change_journal.testCommitClash(master
2671: .getDataTableDef(), journals_since[n]);
2672: }
2673:
2674: }
2675:
2676: // Look at the transaction journal, if a table is dropped that has
2677: // journal entries since the last commit then we have an exception
2678: // case.
2679: for (int i = 0; i < dropped_tables.size(); ++i) {
2680: int table_id = dropped_tables.intAt(i);
2681: // Get the master table with this table id.
2682: MasterTableDataSource master = getMasterTable(table_id);
2683: // Any journal entries made to this dropped table?
2684: if (master.findAllJournalsSince(tran_commit_id).length > 0) {
2685: // Oops, yes, rollback!
2686: throw new TransactionException(
2687: TransactionException.TABLE_REMOVE_CLASH,
2688: "Concurrent Serializable Transaction Conflict(3): "
2689: + "Dropped table has modifications: "
2690: + master.getName());
2691: }
2692: }
2693:
2694: // Tests passed so go on to commit,
2695:
2696: // ---- Commit stage ----
2697:
2698: // Create a normalized list of MasterTableDataSource of all tables that
2699: // were either changed (and not dropped), and created (and not dropped).
2700: // This list represents all tables that are either new or changed in
2701: // this transaction.
2702:
2703: final int created_tables_count = created_tables.size();
2704: final int changed_tables_count = changed_tables.length;
2705: final ArrayList normalized_changed_tables = new ArrayList(
2706: 8);
2707: // Add all tables that were changed and not dropped in this transaction.
2708: for (int i = 0; i < changed_tables_count; ++i) {
2709: MasterTableJournal table_journal = changed_tables[i];
2710: // The table the changes were made to.
2711: int table_id = table_journal.getTableID();
2712: // If this table is not dropped in this transaction and is not
2713: // already in the normalized list then add it.
2714: if (!dropped_tables.contains(table_id)) {
2715: MasterTableDataSource master_table = getMasterTable(table_id);
2716:
2717: CommitTableInfo table_info = new CommitTableInfo();
2718: table_info.master = master_table;
2719: table_info.journal = table_journal;
2720: table_info.changes_since_commit = master_table
2721: .findAllJournalsSince(tran_commit_id);
2722:
2723: normalized_changed_tables.add(table_info);
2724: }
2725: }
2726:
2727: // Add all tables that were created and not dropped in this transaction.
2728: for (int i = 0; i < created_tables_count; ++i) {
2729: int table_id = created_tables.intAt(i);
2730: // If this table is not dropped in this transaction then this is a
2731: // new table in this transaction.
2732: if (!dropped_tables.contains(table_id)) {
2733: MasterTableDataSource master_table = getMasterTable(table_id);
2734: if (!commitTableListContains(
2735: normalized_changed_tables, master_table)) {
2736:
2737: // This is for entries that are created but modified (no journal).
2738: CommitTableInfo table_info = new CommitTableInfo();
2739: table_info.master = master_table;
2740:
2741: normalized_changed_tables.add(table_info);
2742: }
2743: }
2744: }
2745:
2746: // The final size of the normalized changed tables list
2747: final int norm_changed_tables_count = normalized_changed_tables
2748: .size();
2749:
2750: // Create a normalized list of MasterTableDataSource of all tables that
2751: // were dropped (and not created) in this transaction. This list
2752: // represents tables that will be dropped if the transaction
2753: // successfully commits.
2754:
2755: final int dropped_tables_count = dropped_tables.size();
2756: final ArrayList normalized_dropped_tables = new ArrayList(
2757: 8);
2758: for (int i = 0; i < dropped_tables_count; ++i) {
2759: // The dropped table
2760: int table_id = dropped_tables.intAt(i);
2761: // Was this dropped table also created? If it was created in this
2762: // transaction then we don't care about it.
2763: if (!created_tables.contains(table_id)) {
2764: MasterTableDataSource master_table = getMasterTable(table_id);
2765: normalized_dropped_tables.add(master_table);
2766: }
2767: }
2768:
2769: // We now need to create a SimpleTransaction object that we
2770: // use to send to the triggering mechanism. This
2771: // SimpleTransaction represents a very specific view of the
2772: // transaction. This view contains the latest version of changed
2773: // tables in this transaction. It also contains any tables that have
2774: // been created by this transaction and does not contain any tables
2775: // that have been dropped. Any tables that have not been touched by
2776: // this transaction are shown in their current committed state.
2777: // To summarize - this view is the current view of the database plus
2778: // any modifications made by the transaction that is being committed.
2779:
2780: // How this works - All changed tables are merged with the current
2781: // committed table. All created tables are added into check_transaction
2782: // and all dropped tables are removed from check_transaction. If
2783: // there were no other changes to a table between the time the
2784: // transaction was created and now, the view of the table in the
2785: // transaction is used, otherwise the latest changes are merged.
2786:
2787: // Note that this view will be the view that the database will
2788: // ultimately become if this transaction successfully commits. Also,
2789: // you should appreciate that this view is NOT exactly the same as
2790: // the current trasaction view because any changes that have been
2791: // committed by concurrent transactions will be reflected in this view.
2792:
2793: // Create a new transaction of the database which will represent the
2794: // committed view if this commit is successful.
2795: check_transaction = createTransaction();
2796:
2797: // Overwrite this view with tables from this transaction that have
2798: // changed or have been added or dropped.
2799:
2800: // (Note that order here is important). First drop any tables from
2801: // this view.
2802: for (int i = 0; i < normalized_dropped_tables.size(); ++i) {
2803: // Get the table
2804: MasterTableDataSource master_table = (MasterTableDataSource) normalized_dropped_tables
2805: .get(i);
2806: // Drop this table in the current view
2807: check_transaction.removeVisibleTable(master_table);
2808: }
2809:
2810: // Now add any changed tables to the view.
2811:
2812: // Represents view of the changed tables
2813: TableDataSource[] changed_table_source = new TableDataSource[norm_changed_tables_count];
2814: // Set up the above arrays
2815: for (int i = 0; i < norm_changed_tables_count; ++i) {
2816:
2817: // Get the information for this changed table
2818: CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables
2819: .get(i);
2820:
2821: // Get the master table that changed from the normalized list.
2822: MasterTableDataSource master = table_info.master;
2823: // Did this table change since the transaction started?
2824: MasterTableJournal[] all_table_changes = table_info.changes_since_commit;
2825:
2826: if (all_table_changes == null
2827: || all_table_changes.length == 0) {
2828: // No changes so we can pick the correct IndexSet from the current
2829: // transaction.
2830:
2831: // Get the state of the changed tables from the Transaction
2832: MutableTableDataSource mtable = transaction
2833: .getTable(master.getTableName());
2834: // Get the current index set of the changed table
2835: table_info.index_set = transaction
2836: .getIndexSetForTable(master);
2837: // Flush all index changes in the table
2838: mtable.flushIndexChanges();
2839:
2840: // Set the 'check_transaction' object with the latest version of the
2841: // table.
2842: check_transaction
2843: .updateVisibleTable(table_info.master,
2844: table_info.index_set);
2845:
2846: } else {
2847: // There were changes so we need to merge the changes with the
2848: // current view of the table.
2849:
2850: // It's not immediately obvious how this merge update works, but
2851: // basically what happens is we put the table journal with all the
2852: // changes into a new MutableTableDataSource of the current
2853: // committed state, and then we flush all the changes into the
2854: // index and then update the 'check_transaction' with this change.
2855:
2856: // Create the MutableTableDataSource with the changes from this
2857: // journal.
2858: MutableTableDataSource mtable = master
2859: .createTableDataSourceAtCommit(
2860: check_transaction,
2861: table_info.journal);
2862: // Get the current index set of the changed table
2863: table_info.index_set = check_transaction
2864: .getIndexSetForTable(master);
2865: // Flush all index changes in the table
2866: mtable.flushIndexChanges();
2867:
2868: // Dispose the table
2869: mtable.dispose();
2870:
2871: }
2872:
2873: // And now refresh the 'changed_table_source' entry
2874: changed_table_source[i] = check_transaction
2875: .getTable(master.getTableName());
2876:
2877: }
2878:
2879: // The 'check_transaction' now represents the view the database will be
2880: // if the commit succeeds. We lock 'check_transaction' so it is
2881: // read-only (the view is immutable).
2882: check_transaction.setReadOnly();
2883:
2884: // Any tables that the constraints were altered for we need to check
2885: // if any rows in the table violate the new constraints.
2886: for (int i = 0; i < constraint_altered_tables.size(); ++i) {
2887: // We need to check there are no constraint violations for all the
2888: // rows in the table.
2889: int table_id = constraint_altered_tables.intAt(i);
2890: for (int n = 0; n < norm_changed_tables_count; ++n) {
2891: CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables
2892: .get(n);
2893: if (table_info.master.getTableID() == table_id) {
2894: checkAllAddConstraintViolations(
2895: check_transaction,
2896: changed_table_source[n],
2897: Transaction.INITIALLY_DEFERRED);
2898: }
2899: }
2900: }
2901:
2902: // For each changed table we must determine the rows that
2903: // were deleted and perform the remove constraint checks on the
2904: // deleted rows. Note that this happens after the records are
2905: // removed from the index.
2906:
2907: // For each changed table,
2908: for (int i = 0; i < norm_changed_tables_count; ++i) {
2909: CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables
2910: .get(i);
2911: // Get the journal that details the change to the table.
2912: MasterTableJournal change_journal = table_info.journal;
2913: if (change_journal != null) {
2914: // Find the normalized deleted rows.
2915: int[] normalized_removed_rows = change_journal
2916: .normalizedRemovedRows();
2917: // Check removing any of the data doesn't cause a constraint
2918: // violation.
2919: checkRemoveConstraintViolations(
2920: check_transaction,
2921: changed_table_source[i],
2922: normalized_removed_rows,
2923: Transaction.INITIALLY_DEFERRED);
2924:
2925: // Find the normalized added rows.
2926: int[] normalized_added_rows = change_journal
2927: .normalizedAddedRows();
2928: // Check adding any of the data doesn't cause a constraint
2929: // violation.
2930: checkAddConstraintViolations(check_transaction,
2931: changed_table_source[i],
2932: normalized_added_rows,
2933: Transaction.INITIALLY_DEFERRED);
2934:
2935: // Set up the list of added and removed rows
2936: table_info.norm_added_rows = normalized_added_rows;
2937: table_info.norm_removed_rows = normalized_removed_rows;
2938:
2939: }
2940: }
2941:
2942: // Deferred trigger events.
2943: // For each changed table.
2944: n_loop: for (int i = 0; i < norm_changed_tables_count; ++i) {
2945: CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables
2946: .get(i);
2947: // Get the journal that details the change to the table.
2948: MasterTableJournal change_journal = table_info.journal;
2949: if (change_journal != null) {
2950: // Get the table name
2951: TableName table_name = table_info.master
2952: .getTableName();
2953: // The list of listeners to dispatch this event to
2954: TransactionModificationListener[] listeners;
2955: // Are there any listeners listening for events on this table?
2956: synchronized (modification_listeners) {
2957: ArrayList list = (ArrayList) modification_listeners
2958: .get(table_name);
2959: if (list == null || list.size() == 0) {
2960: // If no listeners on this table, continue to the next
2961: // table that was changed.
2962: continue n_loop;
2963: }
2964: // Generate the list of listeners,
2965: listeners = (TransactionModificationListener[]) list
2966: .toArray(new TransactionModificationListener[list
2967: .size()]);
2968: }
2969: // Generate the event
2970: TableCommitModificationEvent event = new TableCommitModificationEvent(
2971: check_transaction, table_name,
2972: table_info.norm_added_rows,
2973: table_info.norm_removed_rows);
2974: // Fire this event on the listeners
2975: for (int n = 0; n < listeners.length; ++n) {
2976: listeners[n].tableCommitChange(event);
2977: }
2978:
2979: } // if (change_journal != null)
2980: } // for each changed table
2981:
2982: // NOTE: This isn't as fail safe as it could be. We really need to
2983: // do the commit in two phases. The first writes updated indices to
2984: // the index files. The second updates the header pointer for the
2985: // respective table. Perhaps we can make the header update
2986: // procedure just one file write.
2987:
2988: // Finally, at this point all constraint checks have passed and the
2989: // changes are ready to finally be committed as permanent changes
2990: // to the conglomerate. All that needs to be done is to commit our
2991: // IndexSet indices for each changed table as final.
2992: // ISSUE: Should we separate the 'committing of indexes' changes and
2993: // 'committing of delete/add flags' to make the FS more robust?
2994: // It would be more robust if all indexes are committed in one go,
2995: // then all table flag data.
2996:
2997: // Set flag to indicate we have committed entries.
2998: entries_committed = true;
2999:
3000: // For each change to each table,
3001: for (int i = 0; i < norm_changed_tables_count; ++i) {
3002: CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables
3003: .get(i);
3004: // Get the journal that details the change to the table.
3005: MasterTableJournal change_journal = table_info.journal;
3006: if (change_journal != null) {
3007: // Get the master table with this table id.
3008: MasterTableDataSource master = table_info.master;
3009: // Commit the changes to the table.
3010: // We use 'this.commit_id' which is the current commit level we are
3011: // at.
3012: master.commitTransactionChange(this .commit_id,
3013: change_journal, table_info.index_set);
3014: // Add to 'changed_tables_list'
3015: changed_tables_list.add(master);
3016: }
3017: }
3018:
3019: // Only do this if we've created or dropped tables.
3020: if (created_tables.size() > 0
3021: || dropped_tables.size() > 0) {
3022: // Update the committed tables in the conglomerate state.
3023: // This will update and synchronize the headers in this conglomerate.
3024: commitToTables(created_tables, dropped_tables);
3025: }
3026:
3027: // Update the namespace clash list
3028: if (database_objects_created.size() > 0
3029: || database_objects_dropped.size() > 0) {
3030: NameSpaceJournal namespace_journal = new NameSpaceJournal(
3031: tran_commit_id, database_objects_created,
3032: database_objects_dropped);
3033: namespace_journal_list.add(namespace_journal);
3034: }
3035:
3036: } finally {
3037:
3038: try {
3039:
3040: // If entries_committed == false it means we didn't get to a point
3041: // where any changed tables were committed. Attempt to rollback the
3042: // changes in this transaction if they haven't been committed yet.
3043: if (entries_committed == false) {
3044: // For each change to each table,
3045: for (int i = 0; i < changed_tables.length; ++i) {
3046: // Get the journal that details the change to the table.
3047: MasterTableJournal change_journal = changed_tables[i];
3048: // The table the changes were made to.
3049: int table_id = change_journal.getTableID();
3050: // Get the master table with this table id.
3051: MasterTableDataSource master = getMasterTable(table_id);
3052: // Commit the rollback on the table.
3053: master
3054: .rollbackTransactionChange(change_journal);
3055: }
3056: if (Debug().isInterestedIn(Lvl.INFORMATION)) {
3057: Debug()
3058: .write(Lvl.INFORMATION, this ,
3059: "Rolled back transaction changes in a commit.");
3060: }
3061: }
3062:
3063: } finally {
3064: try {
3065: // Dispose the 'check_transaction'
3066: if (check_transaction != null) {
3067: check_transaction.dispose();
3068: closeTransaction(check_transaction);
3069: }
3070: // Always ensure a transaction close, even if we have an exception.
3071: // Notify the conglomerate that this transaction has closed.
3072: closeTransaction(transaction);
3073: } catch (Throwable e) {
3074: Debug().writeException(e);
3075: }
3076: }
3077:
3078: }
3079:
3080: // Flush the journals up to the minimum commit id for all the tables
3081: // that this transaction changed.
3082: long min_commit_id = open_transactions
3083: .minimumCommitID(null);
3084: int chsz = changed_tables_list.size();
3085: for (int i = 0; i < chsz; ++i) {
3086: MasterTableDataSource master = (MasterTableDataSource) changed_tables_list
3087: .get(i);
3088: master.mergeJournalChanges(min_commit_id);
3089: }
3090: int nsjsz = namespace_journal_list.size();
3091: for (int i = nsjsz - 1; i >= 0; --i) {
3092: NameSpaceJournal namespace_journal = (NameSpaceJournal) namespace_journal_list
3093: .get(i);
3094: // Remove if the commit id for the journal is less than the minimum
3095: // commit id
3096: if (namespace_journal.commit_id < min_commit_id) {
3097: namespace_journal_list.remove(i);
3098: }
3099: }
3100:
3101: // Set a check point in the store system. This means that the
3102: // persistance state is now stable.
3103: store_system.setCheckPoint();
3104:
3105: } // synchronized (commit_lock)
3106:
3107: }
3108:
3109: /**
3110: * Rollbacks a transaction and invalidates any changes that the transaction
3111: * made to the database. The rows that this transaction changed are given
3112: * up as freely available rows. This is called by the 'closeAndRollback'
3113: * method in Transaction.
3114: */
3115: void processRollback(Transaction transaction,
3116: ArrayList touched_tables, TransactionJournal journal) {
3117:
3118: // Go through the journal. Any rows added should be marked as deleted
3119: // in the respective master table.
3120:
3121: // Get individual journals for updates made to tables in this
3122: // transaction.
3123: // The list MasterTableJournal
3124: ArrayList journal_list = new ArrayList();
3125: for (int i = 0; i < touched_tables.size(); ++i) {
3126: MasterTableJournal table_journal = ((MutableTableDataSource) touched_tables
3127: .get(i)).getJournal();
3128: if (table_journal.entries() > 0) { // Check the journal has entries.
3129: journal_list.add(table_journal);
3130: }
3131: }
3132: MasterTableJournal[] changed_tables = (MasterTableJournal[]) journal_list
3133: .toArray(new MasterTableJournal[journal_list.size()]);
3134:
3135: // The list of tables created by this journal.
3136: IntegerVector created_tables = journal.getTablesCreated();
3137:
3138: synchronized (commit_lock) {
3139:
3140: try {
3141:
3142: // For each change to each table,
3143: for (int i = 0; i < changed_tables.length; ++i) {
3144: // Get the journal that details the change to the table.
3145: MasterTableJournal change_journal = changed_tables[i];
3146: // The table the changes were made to.
3147: int table_id = change_journal.getTableID();
3148: // Get the master table with this table id.
3149: MasterTableDataSource master = getMasterTable(table_id);
3150: // Commit the rollback on the table.
3151: master.rollbackTransactionChange(change_journal);
3152: }
3153:
3154: } finally {
3155: // Notify the conglomerate that this transaction has closed.
3156: closeTransaction(transaction);
3157: }
3158: }
3159: }
3160:
3161: // -----
3162:
3163: /**
3164: * Sets the given List of MasterTableDataSource objects to the currently
3165: * committed list of tables in this conglomerate. This will make the change
3166: * permanent by updating the state file also.
3167: * <p>
3168: * This should be called as part of a transaction commit.
3169: */
3170: private void commitToTables(IntegerVector created_tables,
3171: IntegerVector dropped_tables) {
3172:
3173: // Add created tables to the committed tables list.
3174: for (int i = 0; i < created_tables.size(); ++i) {
3175: // For all created tables, add to the visible list and remove from the
3176: // delete list in the state store.
3177: MasterTableDataSource t = getMasterTable(created_tables
3178: .intAt(i));
3179: StateResource resource = new StateResource(t.getTableID(),
3180: createEncodedTableFile(t));
3181: state_store.addVisibleResource(resource);
3182: state_store.removeDeleteResource(resource.name);
3183: }
3184:
3185: // Remove dropped tables from the committed tables list.
3186: for (int i = 0; i < dropped_tables.size(); ++i) {
3187: // For all dropped tables, add to the delete list and remove from the
3188: // visible list in the state store.
3189: MasterTableDataSource t = getMasterTable(dropped_tables
3190: .intAt(i));
3191: StateResource resource = new StateResource(t.getTableID(),
3192: createEncodedTableFile(t));
3193: state_store.addDeleteResource(resource);
3194: state_store.removeVisibleResource(resource.name);
3195: }
3196:
3197: try {
3198: state_store.commit();
3199: } catch (IOException e) {
3200: Debug().writeException(e);
3201: throw new Error("IO Error: " + e.getMessage());
3202: }
3203: }
3204:
3205: /**
3206: * Returns the MasterTableDataSource in this conglomerate with the given
3207: * table id.
3208: */
3209: MasterTableDataSource getMasterTable(int table_id) {
3210: synchronized (commit_lock) {
3211: // Find the table with this table id.
3212: for (int i = 0; i < table_list.size(); ++i) {
3213: MasterTableDataSource t = (MasterTableDataSource) table_list
3214: .get(i);
3215: if (t.getTableID() == table_id) {
3216: return t;
3217: }
3218: }
3219: throw new Error("Unable to find an open table with id: "
3220: + table_id);
3221: }
3222: }
3223:
3224: /**
3225: * Creates a table store in this conglomerate with the given name and returns
3226: * a reference to the table. Note that this table is not a commited change
3227: * to the system. It is a free standing blank table store. The table
3228: * returned here is uncommitted and will be deleted unless it is committed.
3229: * <p>
3230: * Note that two tables may exist within a conglomerate with the same name,
3231: * however each <b>committed</b> table must have a unique name.
3232: * <p>
3233: * @param table_def the table definition.
3234: * @param data_sector_size the size of the data sectors (affects performance
3235: * and size of the file).
3236: * @param index_sector_size the size of the index sectors.
3237: */
3238: MasterTableDataSource createMasterTable(DataTableDef table_def,
3239: int data_sector_size, int index_sector_size) {
3240: synchronized (commit_lock) {
3241: try {
3242:
3243: // EFFICIENCY: Currently this writes to the conglomerate state file
3244: // twice. Once in 'nextUniqueTableID' and once in
3245: // 'state_store.commit'.
3246:
3247: // The unique id that identifies this table,
3248: int table_id = nextUniqueTableID();
3249:
3250: // Create the object.
3251: V2MasterTableDataSource master_table = new V2MasterTableDataSource(
3252: getSystem(), storeSystem(), open_transactions,
3253: blob_store);
3254: master_table.create(table_id, table_def);
3255:
3256: // Add to the list of all tables.
3257: table_list.add(master_table);
3258:
3259: // Add this to the list of deleted tables,
3260: // (This should really be renamed to uncommitted tables).
3261: markAsCommittedDropped(table_id);
3262:
3263: // Commit this
3264: state_store.commit();
3265:
3266: // And return it.
3267: return master_table;
3268:
3269: } catch (IOException e) {
3270: Debug().writeException(e);
3271: throw new Error("Unable to create master table '"
3272: + table_def.getName() + "' - " + e.getMessage());
3273: }
3274: }
3275:
3276: }
3277:
3278: /**
3279: * Creates a table store in this conglomerate that is an exact copy of the
3280: * given MasterTableDataSource. Note that this table is not a commited change
3281: * to the system. It is a free standing blank table store. The table
3282: * returned here is uncommitted and will be deleted unless it is committed.
3283: * <p>
3284: * Note that two tables may exist within a conglomerate with the same name,
3285: * however each <b>committed</b> table must have a unique name.
3286: * <p>
3287: * @param src_master_table the source master table to copy.
3288: * @param index_set the view of the table index to copy.
3289: * @return the MasterTableDataSource with the copied information.
3290: */
3291: MasterTableDataSource copyMasterTable(
3292: MasterTableDataSource src_master_table, IndexSet index_set) {
3293: synchronized (commit_lock) {
3294: try {
3295:
3296: // EFFICIENCY: Currently this writes to the conglomerate state file
3297: // twice. Once in 'nextUniqueTableID' and once in
3298: // 'state_store.commit'.
3299:
3300: // The unique id that identifies this table,
3301: int table_id = nextUniqueTableID();
3302:
3303: // Create the object.
3304: V2MasterTableDataSource master_table = new V2MasterTableDataSource(
3305: getSystem(), storeSystem(), open_transactions,
3306: blob_store);
3307:
3308: master_table
3309: .copy(table_id, src_master_table, index_set);
3310:
3311: // Add to the list of all tables.
3312: table_list.add(master_table);
3313:
3314: // Add this to the list of deleted tables,
3315: // (This should really be renamed to uncommitted tables).
3316: markAsCommittedDropped(table_id);
3317:
3318: // Commit this
3319: state_store.commit();
3320:
3321: // And return it.
3322: return master_table;
3323:
3324: } catch (IOException e) {
3325: Debug().writeException(e);
3326: throw new RuntimeException(
3327: "Unable to copy master table '"
3328: + src_master_table.getDataTableDef()
3329: .getName() + "' - "
3330: + e.getMessage());
3331: }
3332: }
3333:
3334: }
3335:
3336: // ---------- Inner classes ----------
3337:
3338: /**
3339: * A journal for handling namespace clashes between transactions. For
3340: * example, we would need to generate a conflict if two concurrent
3341: * transactions were to drop the same table, or if a procedure and a
3342: * table with the same name were generated in concurrent transactions.
3343: */
3344: private static class NameSpaceJournal {
3345:
3346: /**
3347: * The commit_id of this journal entry.
3348: */
3349: long commit_id;
3350:
3351: /**
3352: * The list of names created in this journal.
3353: */
3354: ArrayList created_names;
3355:
3356: /**
3357: * The list of names dropped in this journal.
3358: */
3359: ArrayList dropped_names;
3360:
3361: /**
3362: * Constructs the journal.
3363: */
3364: NameSpaceJournal(long commit_id, ArrayList created_names,
3365: ArrayList dropped_names) {
3366: this .commit_id = commit_id;
3367: this .created_names = created_names;
3368: this .dropped_names = dropped_names;
3369: }
3370:
3371: }
3372:
3373: // // ---------- Shutdown hook ----------
3374: //
3375: // /**
3376: // * This is a thread that is started when the shutdown hook for this
3377: // * conglomerate is executed. It goes through each table in the conglomerate
3378: // * and attempts to lock the 'writeLockedObject' for each table. When all the
3379: // * objects are locked it goes into a wait state.
3380: // */
3381: // private class ConglomerateShutdownHookThread extends Thread {
3382: // private boolean complete = false;
3383: //
3384: // ConglomerateShutdownHookThread() {
3385: // setName("Mckoi - JVM Shutdown Hook");
3386: // }
3387: //
3388: // public synchronized void run() {
3389: // // Synchronize over the commit_lock object
3390: // synchronized (commit_lock) {
3391: // if (table_list != null) {
3392: //// System.out.println("Cleanup on: " + TableDataConglomerate.this);
3393: // for (int i = 0; i < table_list.size(); ++i) {
3394: // MasterTableDataSource master =
3395: // (MasterTableDataSource) table_list.get(i);
3396: //// System.out.println("CLEANUP: " + master);
3397: // master.shutdownHookCleanup();
3398: // }
3399: // }
3400: // }
3401: // complete = true;
3402: // notifyAll();
3403: // }
3404: // public synchronized void waitUntilComplete() {
3405: // try {
3406: // while (!complete) {
3407: // wait();
3408: // }
3409: // }
3410: // catch (InterruptedException e) { /* ignore */ }
3411: // }
3412: // }
3413:
3414: public void finalize() {
3415: // removeShutdownHook();
3416: }
3417:
3418: }
|