0001: /*
0002:
0003: Derby - Class org.apache.derby.impl.sql.execute.AlterTableConstantAction
0004:
0005: Licensed to the Apache Software Foundation (ASF) under one or more
0006: contributor license agreements. See the NOTICE file distributed with
0007: this work for additional information regarding copyright ownership.
0008: The ASF licenses this file to you under the Apache License, Version 2.0
0009: (the "License"); you may not use this file except in compliance with
0010: the License. You may obtain a copy of the License at
0011:
0012: http://www.apache.org/licenses/LICENSE-2.0
0013:
0014: Unless required by applicable law or agreed to in writing, software
0015: distributed under the License is distributed on an "AS IS" BASIS,
0016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017: See the License for the specific language governing permissions and
0018: limitations under the License.
0019:
0020: */
0021:
0022: package org.apache.derby.impl.sql.execute;
0023:
0024: import org.apache.derby.iapi.services.sanity.SanityManager;
0025:
0026: import org.apache.derby.iapi.services.io.StreamStorable;
0027:
0028: import org.apache.derby.iapi.error.StandardException;
0029:
0030: import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
0031:
0032: import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
0033: import org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList;
0034: import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
0035: import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptorList;
0036: import org.apache.derby.iapi.sql.dictionary.ReferencedKeyConstraintDescriptor;
0037: import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;
0038: import org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator;
0039: import org.apache.derby.iapi.sql.dictionary.DataDictionary;
0040: import org.apache.derby.iapi.sql.dictionary.DataDictionaryContext;
0041: import org.apache.derby.iapi.sql.dictionary.DefaultDescriptor;
0042: import org.apache.derby.iapi.sql.dictionary.IndexLister;
0043: import org.apache.derby.iapi.sql.dictionary.IndexRowGenerator;
0044: import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
0045: import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
0046: import org.apache.derby.iapi.sql.dictionary.StatisticsDescriptor;
0047: import org.apache.derby.iapi.sql.dictionary.DependencyDescriptor;
0048: import org.apache.derby.iapi.sql.dictionary.CheckConstraintDescriptor;
0049: import org.apache.derby.iapi.sql.dictionary.GenericDescriptorList;
0050: import org.apache.derby.iapi.sql.dictionary.TriggerDescriptor;
0051: import org.apache.derby.impl.sql.catalog.DDColumnDependableFinder;
0052:
0053: import org.apache.derby.iapi.sql.StatementType;
0054:
0055: import org.apache.derby.iapi.types.DataValueDescriptor;
0056: import org.apache.derby.iapi.types.DataTypeDescriptor;
0057: import org.apache.derby.iapi.types.DataValueFactory;
0058:
0059: import org.apache.derby.iapi.reference.SQLState;
0060:
0061: import org.apache.derby.iapi.sql.depend.Dependency;
0062: import org.apache.derby.iapi.sql.depend.DependencyManager;
0063: import org.apache.derby.iapi.sql.depend.Provider;
0064: import org.apache.derby.iapi.sql.depend.ProviderInfo;
0065:
0066: import org.apache.derby.iapi.reference.SQLState;
0067:
0068: import org.apache.derby.iapi.sql.execute.ConstantAction;
0069: import org.apache.derby.iapi.sql.execute.ExecIndexRow;
0070: import org.apache.derby.iapi.sql.execute.ExecRow;
0071: import org.apache.derby.iapi.sql.execute.ExecutionFactory;
0072:
0073: import org.apache.derby.iapi.store.access.ColumnOrdering;
0074: import org.apache.derby.iapi.store.access.ConglomerateController;
0075: import org.apache.derby.iapi.store.access.GroupFetchScanController;
0076: import org.apache.derby.iapi.store.access.Qualifier;
0077: import org.apache.derby.iapi.store.access.RowLocationRetRowSource;
0078: import org.apache.derby.iapi.store.access.RowSource;
0079: import org.apache.derby.iapi.store.access.RowUtil;
0080: import org.apache.derby.iapi.store.access.ScanController;
0081: import org.apache.derby.iapi.store.access.SortController;
0082: import org.apache.derby.iapi.store.access.SortObserver;
0083: import org.apache.derby.iapi.store.access.TransactionController;
0084:
0085: import org.apache.derby.iapi.types.NumberDataValue;
0086:
0087: import org.apache.derby.iapi.sql.Activation;
0088: import org.apache.derby.iapi.sql.ResultSet;
0089: import org.apache.derby.iapi.sql.Statement;
0090: import org.apache.derby.iapi.sql.PreparedStatement;
0091:
0092: import org.apache.derby.iapi.types.RowLocation;
0093:
0094: import org.apache.derby.catalog.UUID;
0095: import org.apache.derby.catalog.IndexDescriptor;
0096: import org.apache.derby.catalog.DependableFinder;
0097:
0098: import org.apache.derby.catalog.types.DefaultInfoImpl;
0099: import org.apache.derby.catalog.types.StatisticsImpl;
0100: import org.apache.derby.catalog.types.ReferencedColumnsDescriptorImpl;
0101:
0102: import java.sql.SQLException;
0103: import java.util.Properties;
0104: import java.util.Enumeration;
0105:
0106: import org.apache.derby.iapi.services.io.FormatableBitSet;
0107:
0108: import java.util.List;
0109: import java.util.Iterator;
0110:
0111: /**
0112: * This class describes actions that are ALWAYS performed for an
0113: * ALTER TABLE Statement at Execution time.
0114: *
0115: * @author Jerry Brenner.
0116: */
0117:
0118: class AlterTableConstantAction extends DDLSingleTableConstantAction
0119: implements RowLocationRetRowSource {
0120:
0121: protected SchemaDescriptor sd;
0122: protected String tableName;
0123: protected UUID schemaId;
0124: protected int tableType;
0125: protected long tableConglomerateId;
0126: protected ColumnInfo[] columnInfo;
0127: protected ConstraintConstantAction[] constraintActions;
0128: protected char lockGranularity;
0129: private boolean compressTable;
0130: private boolean sequential;
0131: private int behavior;
0132:
0133: // Alter table compress and Drop column
0134: private boolean doneScan;
0135: private boolean[] needToDropSort;
0136: private boolean[] validRow;
0137: private int bulkFetchSize = 16;
0138: private int currentCompressRow;
0139: private int numIndexes;
0140: private int rowCount;
0141: private long estimatedRowCount;
0142: private long[] indexConglomerateNumbers;
0143: private long[] sortIds;
0144: private FormatableBitSet indexedCols;
0145: private ConglomerateController compressHeapCC;
0146: private ExecIndexRow[] indexRows;
0147: private ExecRow[] baseRow;
0148: private ExecRow currentRow;
0149: private GroupFetchScanController compressHeapGSC;
0150: private IndexRowGenerator[] compressIRGs;
0151: private DataValueDescriptor[][] baseRowArray;
0152: private RowLocation[] compressRL;
0153: private SortController[] sorters;
0154: private int columnPosition;
0155: private ColumnOrdering[][] ordering;
0156:
0157: private TableDescriptor td;
0158:
0159: //truncate table
0160: private boolean truncateTable;
0161:
0162: // CONSTRUCTORS
0163: private LanguageConnectionContext lcc;
0164: private DataDictionary dd;
0165: private DependencyManager dm;
0166: private TransactionController tc;
0167: private Activation activation;
0168:
0169: /**
0170: * Make the AlterAction for an ALTER TABLE statement.
0171: *
0172: * @param sd descriptor for the schema that table lives in.
0173: * @param tableName Name of table.
0174: * @param tableId UUID of table
0175: * @param tableConglomerateId heap conglomerate number of table
0176: * @param tableType Type of table (e.g., BASE).
0177: * @param columnInfo Information on all the columns in the table.
0178: * @param constraintActions ConstraintConstantAction[] for constraints
0179: * @param lockGranularity The lock granularity.
0180: * @param compressTable Whether or not this is a compress table
0181: * @param behavior drop behavior for dropping column
0182: * @param sequential If compress table/drop column, whether or not sequential
0183: * @param truncateTable Whether or not this is a truncate table
0184: */
0185: AlterTableConstantAction(SchemaDescriptor sd, String tableName,
0186: UUID tableId, long tableConglomerateId, int tableType,
0187: ColumnInfo[] columnInfo,
0188: ConstraintConstantAction[] constraintActions,
0189: char lockGranularity, boolean compressTable, int behavior,
0190: boolean sequential, boolean truncateTable) {
0191: super (tableId);
0192: this .sd = sd;
0193: this .tableName = tableName;
0194: this .tableConglomerateId = tableConglomerateId;
0195: this .tableType = tableType;
0196: this .columnInfo = columnInfo;
0197: this .constraintActions = constraintActions;
0198: this .lockGranularity = lockGranularity;
0199: this .compressTable = compressTable;
0200: this .behavior = behavior;
0201: this .sequential = sequential;
0202: this .truncateTable = truncateTable;
0203:
0204: if (SanityManager.DEBUG) {
0205: SanityManager.ASSERT(sd != null,
0206: "schema descriptor is null");
0207: }
0208: }
0209:
0210: // OBJECT METHODS
0211:
0212: public String toString() {
0213: // Do not put this under SanityManager.DEBUG - it is needed for
0214: // error reporting.
0215:
0216: // we don't bother trying to print out the
0217: // schema because we don't have it until execution
0218: if (truncateTable)
0219: return "TRUNCATE TABLE " + tableName;
0220: else
0221: return "ALTER TABLE " + tableName;
0222: }
0223:
0224: // INTERFACE METHODS
0225:
0226: /**
0227: * This is the guts of the Execution-time logic for ALTER TABLE.
0228: *
0229: * @see ConstantAction#executeConstantAction
0230: *
0231: * @exception StandardException Thrown on failure
0232: */
0233: public void executeConstantAction(Activation activation)
0234: throws StandardException {
0235: LanguageConnectionContext lcc = activation
0236: .getLanguageConnectionContext();
0237: DataDictionary dd = lcc.getDataDictionary();
0238: DependencyManager dm = dd.getDependencyManager();
0239: TransactionController tc = lcc.getTransactionExecute();
0240:
0241: /*
0242: ** Inform the data dictionary that we are about to write to it.
0243: ** There are several calls to data dictionary "get" methods here
0244: ** that might be done in "read" mode in the data dictionary, but
0245: ** it seemed safer to do this whole operation in "write" mode.
0246: **
0247: ** We tell the data dictionary we're done writing at the end of
0248: ** the transaction.
0249: */
0250: dd.startWriting(lcc);
0251:
0252: // now do the real work
0253:
0254: // get an exclusive lock of the heap, to avoid deadlock on rows of
0255: // SYSCOLUMNS etc datadictionary tables (track 879) and phantom table
0256: // descriptor, in which case table shape could be changed by a
0257: // concurrent thread doing add/drop column (track 3804 and 3825)
0258:
0259: // older version (or at target) has to get td first, potential deadlock
0260: if (tableConglomerateId == 0) {
0261: td = dd.getTableDescriptor(tableId);
0262: if (td == null) {
0263: throw StandardException.newException(
0264: SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION,
0265: tableName);
0266: }
0267: tableConglomerateId = td.getHeapConglomerateId();
0268: }
0269:
0270: lockTableForDDL(tc, tableConglomerateId, true);
0271:
0272: td = dd.getTableDescriptor(tableId);
0273: if (td == null) {
0274: throw StandardException.newException(
0275: SQLState.LANG_TABLE_NOT_FOUND_DURING_EXECUTION,
0276: tableName);
0277: }
0278:
0279: if (truncateTable)
0280: dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);
0281: else
0282: dm.invalidateFor(td, DependencyManager.ALTER_TABLE, lcc);
0283: execGuts(activation);
0284: }
0285:
0286: /**
0287: * Wrapper for this DDL action. Factored out so that our child,
0288: * RepAlterTableConstantAction
0289: * could enjoy the benefits of the startWriting() method above.
0290: *
0291: *
0292: * @exception StandardException Thrown on failure
0293: */
0294: public void execGuts(Activation activation)
0295: throws StandardException {
0296: ColumnDescriptor columnDescriptor;
0297: int numRows = 0;
0298: boolean tableNeedsScanning = false;
0299: boolean tableScanned = false;
0300:
0301: LanguageConnectionContext lcc = activation
0302: .getLanguageConnectionContext();
0303: DataDictionary dd = lcc.getDataDictionary();
0304: DependencyManager dm = dd.getDependencyManager();
0305: TransactionController tc = lcc.getTransactionExecute();
0306:
0307: // Save the TableDescriptor off in the Activation
0308: activation.setDDLTableDescriptor(td);
0309:
0310: /*
0311: ** If the schema descriptor is null, then
0312: ** we must have just read ourselves in.
0313: ** So we will get the corresponding schema
0314: ** descriptor from the data dictionary.
0315: */
0316: if (sd == null) {
0317: sd = getAndCheckSchemaDescriptor(dd, schemaId,
0318: "ALTER TABLE");
0319: }
0320:
0321: /* Prepare all dependents to invalidate. (This is there chance
0322: * to say that they can't be invalidated. For example, an open
0323: * cursor referencing a table/view that the user is attempting to
0324: * alter.) If no one objects, then invalidate any dependent objects.
0325: */
0326: if (truncateTable)
0327: dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);
0328: else
0329: dm.invalidateFor(td, DependencyManager.ALTER_TABLE, lcc);
0330:
0331: // Are we working on columns?
0332: if (columnInfo != null) {
0333: /* NOTE: We only allow a single column to be added within
0334: * each ALTER TABLE command at the language level. However,
0335: * this may change some day, so we will try to plan for it.
0336: */
0337: /* for each new column, see if the user is adding a non-nullable
0338: * column. This is only allowed on an empty table.
0339: */
0340: for (int ix = 0; ix < columnInfo.length; ix++) {
0341:
0342: /* Is this new column non-nullable?
0343: * If so, it can only be added to an
0344: * empty table if it does not have a default value.
0345: * We need to scan the table to find out how many rows
0346: * there are.
0347: */
0348: if ((columnInfo[ix].action == ColumnInfo.CREATE)
0349: && !(columnInfo[ix].dataType.isNullable())
0350: && (columnInfo[ix].defaultInfo == null)
0351: && (columnInfo[ix].autoincInc == 0)) {
0352: tableNeedsScanning = true;
0353: }
0354: }
0355:
0356: // Scan the table if necessary
0357: if (tableNeedsScanning) {
0358: numRows = getSemiRowCount(tc);
0359: // Don't allow user to add non-nullable column to non-empty table
0360: if (numRows > 0) {
0361: throw StandardException
0362: .newException(
0363: SQLState.LANG_ADDING_NON_NULL_COLUMN_TO_NON_EMPTY_TABLE,
0364: td.getQualifiedName());
0365: }
0366: tableScanned = true;
0367: }
0368:
0369: // for each related column, stuff system.column
0370: for (int ix = 0; ix < columnInfo.length; ix++) {
0371: ColumnDescriptorList cdl = new ColumnDescriptorList();
0372:
0373: /* If there is a default value, use it, otherwise use null */
0374:
0375: // Are we adding a new column or modifying a default?
0376: if (columnInfo[ix].action == ColumnInfo.CREATE) {
0377: addNewColumnToTable(activation, ix);
0378: } else if (columnInfo[ix].action == ColumnInfo.MODIFY_COLUMN_DEFAULT_RESTART
0379: || columnInfo[ix].action == ColumnInfo.MODIFY_COLUMN_DEFAULT_INCREMENT) {
0380: modifyColumnDefault(activation, ix);
0381: } else if (columnInfo[ix].action == ColumnInfo.MODIFY_COLUMN_TYPE) {
0382: modifyColumnType(activation, ix);
0383: } else if (columnInfo[ix].action == ColumnInfo.MODIFY_COLUMN_CONSTRAINT) {
0384: modifyColumnConstraint(activation,
0385: columnInfo[ix].name, true);
0386: } else if (columnInfo[ix].action == ColumnInfo.MODIFY_COLUMN_CONSTRAINT_NOT_NULL) {
0387: if (!tableScanned) {
0388: tableScanned = true;
0389: numRows = getSemiRowCount(tc);
0390: }
0391: // check that the data in the column is not null
0392: String colNames[] = new String[1];
0393: colNames[0] = columnInfo[ix].name;
0394: boolean nullCols[] = new boolean[1];
0395:
0396: /* note validateNotNullConstraint returns true if the
0397: * column is nullable
0398: */
0399: if (validateNotNullConstraint(colNames, nullCols,
0400: numRows, lcc,
0401: SQLState.LANG_NULL_DATA_IN_NON_NULL_COLUMN)) {
0402: /* nullable column - modify it to be not null
0403: * This is O.K. at this point since we would have
0404: * thrown an exception if any data was null
0405: */
0406: modifyColumnConstraint(activation,
0407: columnInfo[ix].name, false);
0408: }
0409: } else if (columnInfo[ix].action == ColumnInfo.DROP) {
0410: dropColumnFromTable(activation, ix);
0411: } else if (SanityManager.DEBUG) {
0412: SanityManager
0413: .THROWASSERT("Unexpected action in AlterTableConstantAction");
0414: }
0415: }
0416: }
0417:
0418: /* Create/Drop any constraints */
0419: if (constraintActions != null) {
0420: for (int conIndex = 0; conIndex < constraintActions.length; conIndex++) {
0421: ConstraintConstantAction cca = constraintActions[conIndex];
0422:
0423: if (cca instanceof CreateConstraintConstantAction) {
0424: int constraintType = cca.getConstraintType();
0425:
0426: /* Some constraint types require special checking:
0427: * Check - table must be empty, for now
0428: * Primary Key - table cannot already have a primary key
0429: */
0430: switch (constraintType) {
0431: case DataDictionary.PRIMARYKEY_CONSTRAINT:
0432: // Check to see if a constraint of the same type already exists
0433: ConstraintDescriptorList cdl = dd
0434: .getConstraintDescriptors(td);
0435: if (cdl.getPrimaryKey() != null) {
0436: throw StandardException
0437: .newException(
0438: SQLState.LANG_ADD_PRIMARY_KEY_FAILED1,
0439: td.getQualifiedName());
0440: }
0441: if (!tableScanned) {
0442: tableScanned = true;
0443: numRows = getSemiRowCount(tc);
0444: }
0445:
0446: break;
0447: case DataDictionary.CHECK_CONSTRAINT:
0448: if (!tableScanned) {
0449: tableScanned = true;
0450: numRows = getSemiRowCount(tc);
0451: }
0452: if (numRows > 0) {
0453: /*
0454: ** We are assuming that there will only be one
0455: ** check constraint that we are adding, so it
0456: ** is ok to do the check now rather than try
0457: ** to lump together several checks.
0458: */
0459: ConstraintConstantAction
0460: .validateConstraint(
0461: cca.getConstraintName(),
0462: ((CreateConstraintConstantAction) cca)
0463: .getConstraintText(),
0464: td, lcc, true);
0465: }
0466: break;
0467: }
0468: } else {
0469: if (SanityManager.DEBUG) {
0470: if (!(cca instanceof DropConstraintConstantAction)) {
0471: SanityManager
0472: .THROWASSERT("constraintActions["
0473: + conIndex
0474: + "] expected to be instanceof DropConstraintConstantAction not "
0475: + cca.getClass().getName());
0476: }
0477: }
0478: }
0479: constraintActions[conIndex]
0480: .executeConstantAction(activation);
0481: }
0482: }
0483:
0484: // Are we changing the lock granularity?
0485: if (lockGranularity != '\0') {
0486: if (SanityManager.DEBUG) {
0487: if (lockGranularity != 'T' && lockGranularity != 'R') {
0488: SanityManager
0489: .THROWASSERT("lockGranularity expected to be 'T'or 'R', not "
0490: + lockGranularity);
0491: }
0492: }
0493:
0494: // update the TableDescriptor
0495: td.setLockGranularity(lockGranularity);
0496: // update the DataDictionary
0497: dd.updateLockGranularity(td, sd, lockGranularity, tc);
0498: }
0499:
0500: // Are we doing a compress table?
0501: if (compressTable) {
0502: compressTable(activation);
0503: }
0504:
0505: // Are we doing a truncate table?
0506: if (truncateTable) {
0507: truncateTable(activation);
0508: }
0509:
0510: }
0511:
0512: /**
0513: * Workhorse for adding a new column to a table.
0514: *
0515: * @param ix the index of the column specfication in the ALTER
0516: * statement-- currently we allow only one.
0517: * @exception StandardException thrown on failure.
0518: */
0519: private void addNewColumnToTable(Activation activation, int ix)
0520: throws StandardException {
0521: LanguageConnectionContext lcc = activation
0522: .getLanguageConnectionContext();
0523: DataDictionary dd = lcc.getDataDictionary();
0524: DependencyManager dm = dd.getDependencyManager();
0525: TransactionController tc = lcc.getTransactionExecute();
0526:
0527: ColumnDescriptor columnDescriptor = td
0528: .getColumnDescriptor(columnInfo[ix].name);
0529: DataValueDescriptor storableDV;
0530: int colNumber = td.getMaxColumnID() + ix;
0531: DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
0532:
0533: /* We need to verify that the table does not have an existing
0534: * column with the same name before we try to add the new
0535: * one as addColumnDescriptor() is a void method.
0536: */
0537: if (columnDescriptor != null) {
0538: throw StandardException.newException(
0539: SQLState.LANG_OBJECT_ALREADY_EXISTS_IN_OBJECT,
0540: columnDescriptor.getDescriptorType(),
0541: columnInfo[ix].name, td.getDescriptorType(), td
0542: .getQualifiedName());
0543: }
0544:
0545: if (columnInfo[ix].defaultValue != null)
0546: storableDV = columnInfo[ix].defaultValue;
0547: else
0548: storableDV = columnInfo[ix].dataType.getNull();
0549:
0550: // Add the column to the conglomerate.(Column ids in store are 0-based)
0551: tc.addColumnToConglomerate(td.getHeapConglomerateId(),
0552: colNumber, storableDV);
0553:
0554: UUID defaultUUID = columnInfo[ix].newDefaultUUID;
0555:
0556: /* Generate a UUID for the default, if one exists
0557: * and there is no default id yet.
0558: */
0559: if (columnInfo[ix].defaultInfo != null && defaultUUID == null) {
0560: defaultUUID = dd.getUUIDFactory().createUUID();
0561: }
0562:
0563: // Add the column to syscolumns.
0564: // Column ids in system tables are 1-based
0565: columnDescriptor = new ColumnDescriptor(columnInfo[ix].name,
0566: colNumber + 1, columnInfo[ix].dataType,
0567: columnInfo[ix].defaultValue,
0568: columnInfo[ix].defaultInfo, td, defaultUUID,
0569: columnInfo[ix].autoincStart, columnInfo[ix].autoincInc);
0570:
0571: dd.addDescriptor(columnDescriptor, td,
0572: DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
0573:
0574: // now add the column to the tables column descriptor list.
0575: td.getColumnDescriptorList().add(columnDescriptor);
0576:
0577: if (columnDescriptor.isAutoincrement()) {
0578: updateNewAutoincrementColumn(activation,
0579: columnInfo[ix].name, columnInfo[ix].autoincStart,
0580: columnInfo[ix].autoincInc);
0581: }
0582:
0583: // Update the new column to its default, if it has a non-null default
0584: if (columnDescriptor.hasNonNullDefault()) {
0585: updateNewColumnToDefault(activation, columnInfo[ix].name,
0586: columnInfo[ix].defaultInfo.getDefaultText(), lcc);
0587: }
0588:
0589: // Update SYSCOLPERMS table which tracks the permissions granted
0590: // at columns level. The sytem table has a bit map of all the columns
0591: // in the user table to help determine which columns have the
0592: // permission granted on them. Since we are adding a new column,
0593: // that bit map needs to be expanded and initialize the bit for it
0594: // to 0 since at the time of ADD COLUMN, no permissions have been
0595: // granted on that new column.
0596: //
0597: dd.updateSYSCOLPERMSforAddColumnToUserTable(td.getUUID(), tc);
0598: }
0599:
0600: /**
0601: * Workhorse for dropping a column from a table.
0602: *
0603: * @param ix the index of the column specfication in the ALTER
0604: * statement-- currently we allow only one.
0605: * @exception StandardException thrown on failure.
0606: */
0607: private void dropColumnFromTable(Activation activation, int ix)
0608: throws StandardException {
0609: LanguageConnectionContext lcc = activation
0610: .getLanguageConnectionContext();
0611: DataDictionary dd = lcc.getDataDictionary();
0612: DependencyManager dm = dd.getDependencyManager();
0613: TransactionController tc = lcc.getTransactionExecute();
0614:
0615: ColumnDescriptor columnDescriptor = td
0616: .getColumnDescriptor(columnInfo[ix].name);
0617:
0618: // We already verified this in bind, but do it again
0619: if (columnDescriptor == null) {
0620: throw StandardException.newException(
0621: SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE,
0622: columnInfo[ix].name, td.getQualifiedName());
0623: }
0624:
0625: DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
0626: ColumnDescriptorList tab_cdl = td.getColumnDescriptorList();
0627: int size = tab_cdl.size();
0628:
0629: // can NOT drop a column if it is the only one in the table
0630: if (size == 1) {
0631: throw StandardException.newException(
0632: SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT,
0633: dm.getActionString(DependencyManager.DROP_COLUMN),
0634: "THE *LAST* COLUMN " + columnInfo[ix].name,
0635: "TABLE", td.getQualifiedName());
0636: }
0637:
0638: columnPosition = columnDescriptor.getPosition();
0639: boolean cascade = (behavior == StatementType.DROP_CASCADE);
0640:
0641: FormatableBitSet toDrop = new FormatableBitSet(size + 1);
0642: toDrop.set(columnPosition);
0643: td.setReferencedColumnMap(toDrop);
0644:
0645: dm.invalidateFor(td, DependencyManager.DROP_COLUMN, lcc);
0646:
0647: // If column has a default we drop the default and any dependencies
0648: if (columnDescriptor.getDefaultInfo() != null) {
0649: DefaultDescriptor defaultDesc = columnDescriptor
0650: .getDefaultDescriptor(dd);
0651: dm.clearDependencies(lcc, defaultDesc);
0652: }
0653:
0654: // need to deal with triggers if has referencedColumns
0655: GenericDescriptorList tdl = dd.getTriggerDescriptors(td);
0656: Enumeration descs = tdl.elements();
0657: while (descs.hasMoreElements()) {
0658: TriggerDescriptor trd = (TriggerDescriptor) descs
0659: .nextElement();
0660: int[] referencedCols = trd.getReferencedCols();
0661: if (referencedCols == null)
0662: continue;
0663: int refColLen = referencedCols.length, j;
0664: boolean changed = false;
0665: for (j = 0; j < refColLen; j++) {
0666: if (referencedCols[j] > columnPosition)
0667: changed = true;
0668: else if (referencedCols[j] == columnPosition) {
0669: if (cascade) {
0670: DropTriggerConstantAction
0671: .dropTriggerDescriptor(lcc, dm, dd, tc,
0672: trd, activation);
0673: activation.addWarning(StandardException
0674: .newWarning(
0675: SQLState.LANG_TRIGGER_DROPPED,
0676: trd.getName(), td.getName()));
0677: } else { // we'd better give an error if don't drop it,
0678: // otherwsie there would be unexpected behaviors
0679: throw StandardException
0680: .newException(
0681: SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT,
0682: dm
0683: .getActionString(DependencyManager.DROP_COLUMN),
0684: columnInfo[ix].name, "TRIGGER",
0685: trd.getName());
0686: }
0687: break;
0688: }
0689: }
0690:
0691: // change triggers to refer to columns in new positions
0692: if (j == refColLen && changed) {
0693: dd.dropTriggerDescriptor(trd, tc);
0694: for (j = 0; j < refColLen; j++) {
0695: if (referencedCols[j] > columnPosition)
0696: referencedCols[j]--;
0697: }
0698: dd.addDescriptor(trd, sd,
0699: DataDictionary.SYSTRIGGERS_CATALOG_NUM, false,
0700: tc);
0701: }
0702: }
0703:
0704: ConstraintDescriptorList csdl = dd.getConstraintDescriptors(td);
0705: int csdl_size = csdl.size();
0706:
0707: // we want to remove referenced primary/unique keys in the second
0708: // round. This will ensure that self-referential constraints will
0709: // work OK.
0710: int tbr_size = 0;
0711: ConstraintDescriptor[] toBeRemoved = new ConstraintDescriptor[csdl_size];
0712:
0713: // let's go downwards, don't want to get messed up while removing
0714: for (int i = csdl_size - 1; i >= 0; i--) {
0715: ConstraintDescriptor cd = csdl.elementAt(i);
0716: int[] referencedColumns = cd.getReferencedColumns();
0717: int numRefCols = referencedColumns.length, j;
0718: boolean changed = false;
0719: for (j = 0; j < numRefCols; j++) {
0720: if (referencedColumns[j] > columnPosition)
0721: changed = true;
0722: if (referencedColumns[j] == columnPosition)
0723: break;
0724: }
0725: if (j == numRefCols) // column not referenced
0726: {
0727: if ((cd instanceof CheckConstraintDescriptor)
0728: && changed) {
0729: dd.dropConstraintDescriptor(td, cd, tc);
0730: for (j = 0; j < numRefCols; j++) {
0731: if (referencedColumns[j] > columnPosition)
0732: referencedColumns[j]--;
0733: }
0734: ((CheckConstraintDescriptor) cd)
0735: .setReferencedColumnsDescriptor(new ReferencedColumnsDescriptorImpl(
0736: referencedColumns));
0737: dd.addConstraintDescriptor(cd, tc);
0738: }
0739: continue;
0740: }
0741:
0742: if (!cascade) {
0743: if (numRefCols > 1
0744: || cd.getConstraintType() == DataDictionary.PRIMARYKEY_CONSTRAINT) {
0745: throw StandardException
0746: .newException(
0747: SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT,
0748: dm
0749: .getActionString(DependencyManager.DROP_COLUMN),
0750: columnInfo[ix].name, "CONSTRAINT",
0751: cd.getConstraintName());
0752: }
0753: }
0754:
0755: if (cd instanceof ReferencedKeyConstraintDescriptor) {
0756: // restrict will raise an error in invalidate if really referenced
0757: toBeRemoved[tbr_size++] = cd;
0758: continue;
0759: }
0760:
0761: // drop now in all other cases
0762: dm
0763: .invalidateFor(cd,
0764: DependencyManager.DROP_CONSTRAINT, lcc);
0765: DropConstraintConstantAction.dropConstraintAndIndex(dm, td,
0766: dd, cd, tc, lcc, true);
0767: activation.addWarning(StandardException.newWarning(
0768: SQLState.LANG_CONSTRAINT_DROPPED, cd
0769: .getConstraintName(), td.getName()));
0770: }
0771:
0772: for (int i = tbr_size - 1; i >= 0; i--) {
0773: ConstraintDescriptor cd = toBeRemoved[i];
0774: DropConstraintConstantAction.dropConstraintAndIndex(dm, td,
0775: dd, cd, tc, lcc, false);
0776: activation.addWarning(StandardException.newWarning(
0777: SQLState.LANG_CONSTRAINT_DROPPED, cd
0778: .getConstraintName(), td.getName()));
0779:
0780: if (cascade) {
0781: ConstraintDescriptorList fkcdl = dd.getForeignKeys(cd
0782: .getUUID());
0783: for (int j = 0; j < fkcdl.size(); j++) {
0784: ConstraintDescriptor fkcd = (ConstraintDescriptor) fkcdl
0785: .elementAt(j);
0786: dm.invalidateFor(fkcd,
0787: DependencyManager.DROP_CONSTRAINT, lcc);
0788:
0789: DropConstraintConstantAction
0790: .dropConstraintAndIndex(dm, fkcd
0791: .getTableDescriptor(), dd, fkcd,
0792: tc, lcc, true);
0793: activation.addWarning(StandardException.newWarning(
0794: SQLState.LANG_CONSTRAINT_DROPPED, fkcd
0795: .getConstraintName(), fkcd
0796: .getTableDescriptor().getName()));
0797: }
0798: }
0799:
0800: dm
0801: .invalidateFor(cd,
0802: DependencyManager.DROP_CONSTRAINT, lcc);
0803: dm.clearDependencies(lcc, cd);
0804: }
0805:
0806: compressTable(activation);
0807:
0808: // drop the column from syscolumns
0809: dd.dropColumnDescriptor(td.getUUID(), columnInfo[ix].name, tc);
0810: ColumnDescriptor[] cdlArray = new ColumnDescriptor[size
0811: - columnDescriptor.getPosition()];
0812:
0813: for (int i = columnDescriptor.getPosition(), j = 0; i < size; i++, j++) {
0814: ColumnDescriptor cd = (ColumnDescriptor) tab_cdl
0815: .elementAt(i);
0816: dd.dropColumnDescriptor(td.getUUID(), cd.getColumnName(),
0817: tc);
0818: cd.setPosition(i);
0819: cdlArray[j] = cd;
0820: }
0821: dd.addDescriptorArray(cdlArray, td,
0822: DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
0823:
0824: List deps = dd.getProvidersDescriptorList(td.getObjectID()
0825: .toString());
0826: for (Iterator depsIterator = deps.listIterator(); depsIterator
0827: .hasNext();) {
0828: DependencyDescriptor depDesc = (DependencyDescriptor) depsIterator
0829: .next();
0830: DependableFinder finder = depDesc.getProviderFinder();
0831: if (finder instanceof DDColumnDependableFinder) {
0832: DDColumnDependableFinder colFinder = (DDColumnDependableFinder) finder;
0833: FormatableBitSet oldColumnBitMap = new FormatableBitSet(
0834: colFinder.getColumnBitMap());
0835: FormatableBitSet newColumnBitMap = new FormatableBitSet(
0836: oldColumnBitMap);
0837: newColumnBitMap.clear();
0838: int bitLen = oldColumnBitMap.getLength();
0839: for (int i = 0; i < bitLen; i++) {
0840: if (i < columnPosition && oldColumnBitMap.isSet(i))
0841: newColumnBitMap.set(i);
0842: if (i > columnPosition && oldColumnBitMap.isSet(i))
0843: newColumnBitMap.set(i - 1);
0844: }
0845: if (newColumnBitMap.equals(oldColumnBitMap))
0846: continue;
0847: dd.dropStoredDependency(depDesc, tc);
0848: colFinder.setColumnBitMap(newColumnBitMap
0849: .getByteArray());
0850: dd
0851: .addDescriptor(depDesc, null,
0852: DataDictionary.SYSDEPENDS_CATALOG_NUM,
0853: true, tc);
0854: }
0855: }
0856: }
0857:
0858: private void modifyColumnType(Activation activation, int ix)
0859: throws StandardException {
0860: LanguageConnectionContext lcc = activation
0861: .getLanguageConnectionContext();
0862: DataDictionary dd = lcc.getDataDictionary();
0863: TransactionController tc = lcc.getTransactionExecute();
0864:
0865: ColumnDescriptor columnDescriptor = td
0866: .getColumnDescriptor(columnInfo[ix].name), newColumnDescriptor = null;
0867:
0868: newColumnDescriptor = new ColumnDescriptor(columnInfo[ix].name,
0869: columnDescriptor.getPosition(),
0870: columnInfo[ix].dataType, columnDescriptor
0871: .getDefaultValue(), columnDescriptor
0872: .getDefaultInfo(), td, columnDescriptor
0873: .getDefaultUUID(), columnInfo[ix].autoincStart,
0874: columnInfo[ix].autoincInc);
0875:
0876: // Update the ColumnDescriptor with new default info
0877: dd.dropColumnDescriptor(td.getUUID(), columnInfo[ix].name, tc);
0878: dd.addDescriptor(newColumnDescriptor, td,
0879: DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
0880: }
0881:
0882: /**
0883: * Workhorse for modifying column level constraints.
0884: * Right now it is restricted to modifying a null constraint to a not null
0885: * constraint.
0886: */
0887: private void modifyColumnConstraint(Activation activation,
0888: String colName, boolean nullability)
0889: throws StandardException {
0890: LanguageConnectionContext lcc = activation
0891: .getLanguageConnectionContext();
0892: DataDictionary dd = lcc.getDataDictionary();
0893: TransactionController tc = lcc.getTransactionExecute();
0894:
0895: ColumnDescriptor columnDescriptor = td
0896: .getColumnDescriptor(colName), newColumnDescriptor = null;
0897: DataTypeDescriptor dataType = columnDescriptor.getType();
0898:
0899: // set nullability
0900: dataType.setNullability(nullability);
0901:
0902: newColumnDescriptor = new ColumnDescriptor(colName,
0903: columnDescriptor.getPosition(), dataType,
0904: columnDescriptor.getDefaultValue(), columnDescriptor
0905: .getDefaultInfo(), td, columnDescriptor
0906: .getDefaultUUID(), columnDescriptor
0907: .getAutoincStart(), columnDescriptor
0908: .getAutoincInc());
0909:
0910: // Update the ColumnDescriptor with new default info
0911: dd.dropColumnDescriptor(td.getUUID(), colName, tc);
0912: dd.addDescriptor(newColumnDescriptor, td,
0913: DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
0914:
0915: }
0916:
0917: /**
0918: * Workhorse for modifying the default value of a column.
0919: *
0920: * @param activation activation
0921: * @param ix the index of the column specfication in the ALTER
0922: * statement-- currently we allow only one.
0923: * @exception StandardException, thrown on error.
0924: */
0925: private void modifyColumnDefault(Activation activation, int ix)
0926: throws StandardException {
0927: LanguageConnectionContext lcc = activation
0928: .getLanguageConnectionContext();
0929: DataDictionary dd = lcc.getDataDictionary();
0930: DependencyManager dm = dd.getDependencyManager();
0931: TransactionController tc = lcc.getTransactionExecute();
0932:
0933: ColumnDescriptor columnDescriptor = td
0934: .getColumnDescriptor(columnInfo[ix].name);
0935: DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
0936: int columnPosition = columnDescriptor.getPosition();
0937:
0938: // Clean up after the old default, if non-null
0939: if (columnDescriptor.hasNonNullDefault()) {
0940: // Invalidate off of the old default
0941: DefaultDescriptor defaultDescriptor = new DefaultDescriptor(
0942: dd, columnInfo[ix].oldDefaultUUID, td.getUUID(),
0943: columnPosition);
0944:
0945: dm.invalidateFor(defaultDescriptor,
0946: DependencyManager.MODIFY_COLUMN_DEFAULT, lcc);
0947:
0948: // Drop any dependencies
0949: dm.clearDependencies(lcc, defaultDescriptor);
0950: }
0951:
0952: UUID defaultUUID = columnInfo[ix].newDefaultUUID;
0953:
0954: /* Generate a UUID for the default, if one exists
0955: * and there is no default id yet.
0956: */
0957: if (columnInfo[ix].defaultInfo != null && defaultUUID == null) {
0958: defaultUUID = dd.getUUIDFactory().createUUID();
0959: }
0960:
0961: /* Get a ColumnDescriptor reflecting the new default */
0962: columnDescriptor = new ColumnDescriptor(columnInfo[ix].name,
0963: columnPosition, columnInfo[ix].dataType,
0964: columnInfo[ix].defaultValue,
0965: columnInfo[ix].defaultInfo, td, defaultUUID,
0966: columnInfo[ix].autoincStart, columnInfo[ix].autoincInc,
0967: columnInfo[ix].autoinc_create_or_modify_Start_Increment);
0968:
0969: // Update the ColumnDescriptor with new default info
0970: dd.dropColumnDescriptor(td.getUUID(), columnInfo[ix].name, tc);
0971: dd.addDescriptor(columnDescriptor, td,
0972: DataDictionary.SYSCOLUMNS_CATALOG_NUM, false, tc);
0973:
0974: if (columnInfo[ix].action == ColumnInfo.MODIFY_COLUMN_DEFAULT_INCREMENT) {
0975: // adding an autoincrement default-- calculate the maximum value
0976: // of the autoincrement column.
0977: long maxValue = getColumnMax(activation, td,
0978: columnInfo[ix].name, columnInfo[ix].autoincInc,
0979: columnInfo[ix].autoincStart);
0980: dd.setAutoincrementValue(tc, td.getUUID(),
0981: columnInfo[ix].name, maxValue, true);
0982: } else if (columnInfo[ix].action == ColumnInfo.MODIFY_COLUMN_DEFAULT_RESTART) {
0983: dd.setAutoincrementValue(tc, td.getUUID(),
0984: columnInfo[ix].name, columnInfo[ix].autoincStart,
0985: false);
0986: }
0987: }
0988:
0989: /* NOTE: compressTable can also be called for
0990: * ALTER TABLE <t> DROP COLUMN <c>;
0991: */
0992: private void compressTable(Activation activation)
0993: throws StandardException {
0994: ExecRow emptyHeapRow;
0995: long newHeapConglom;
0996: Properties properties = new Properties();
0997: RowLocation rl;
0998: this .lcc = activation.getLanguageConnectionContext();
0999: this .dd = lcc.getDataDictionary();
1000: this .dm = dd.getDependencyManager();
1001: this .tc = lcc.getTransactionExecute();
1002: this .activation = activation;
1003:
1004: if (SanityManager.DEBUG) {
1005: if (lockGranularity != '\0') {
1006: SanityManager
1007: .THROWASSERT("lockGranularity expected to be '\0', not "
1008: + lockGranularity);
1009: }
1010: SanityManager.ASSERT(!compressTable || columnInfo == null,
1011: "columnInfo expected to be null");
1012: SanityManager.ASSERT(constraintActions == null,
1013: "constraintActions expected to be null");
1014: }
1015: emptyHeapRow = td.getEmptyExecRow(lcc.getContextManager());
1016: compressHeapCC = tc.openConglomerate(
1017: td.getHeapConglomerateId(), false,
1018: TransactionController.OPENMODE_FORUPDATE,
1019: TransactionController.MODE_TABLE,
1020: TransactionController.ISOLATION_SERIALIZABLE);
1021:
1022: // invalidate any prepared statements that
1023: // depended on this table (including this one)
1024: // bug 3653 has threads that start up and block on our lock, but do
1025: // not see they have to recompile their plan. We now invalidate earlier
1026: // however they still might recompile using the old conglomerate id before we
1027: // commit our DD changes.
1028: //
1029: dm.invalidateFor(td, DependencyManager.COMPRESS_TABLE, lcc);
1030:
1031: rl = compressHeapCC.newRowLocationTemplate();
1032:
1033: // Get the properties on the old heap
1034: compressHeapCC.getInternalTablePropertySet(properties);
1035: compressHeapCC.close();
1036: compressHeapCC = null;
1037:
1038: // Create an array to put base row template
1039: baseRow = new ExecRow[bulkFetchSize];
1040: baseRowArray = new DataValueDescriptor[bulkFetchSize][];
1041: validRow = new boolean[bulkFetchSize];
1042:
1043: /* Set up index info */
1044: getAffectedIndexes(activation);
1045: // Get an array of RowLocation template
1046: compressRL = new RowLocation[bulkFetchSize];
1047: indexRows = new ExecIndexRow[numIndexes];
1048: if (!compressTable) {
1049: ExecRow newRow = activation.getExecutionFactory()
1050: .getValueRow(emptyHeapRow.nColumns() - 1);
1051: for (int i = 0; i < newRow.nColumns(); i++) {
1052: newRow.setColumn(i + 1,
1053: i < columnPosition - 1 ? emptyHeapRow
1054: .getColumn(i + 1) : emptyHeapRow
1055: .getColumn(i + 1 + 1));
1056: }
1057: emptyHeapRow = newRow;
1058: }
1059: setUpAllSorts(emptyHeapRow, rl);
1060:
1061: // Start by opening a full scan on the base table.
1062: openBulkFetchScan(td.getHeapConglomerateId());
1063:
1064: // Get the estimated row count for the sorters
1065: estimatedRowCount = compressHeapGSC.getEstimatedRowCount();
1066:
1067: // Create the array of base row template
1068: for (int i = 0; i < bulkFetchSize; i++) {
1069: // create a base row template
1070: baseRow[i] = td.getEmptyExecRow(lcc.getContextManager());
1071: baseRowArray[i] = baseRow[i].getRowArray();
1072: compressRL[i] = compressHeapGSC.newRowLocationTemplate();
1073: }
1074:
1075: newHeapConglom = tc.createAndLoadConglomerate("heap",
1076: emptyHeapRow.getRowArray(),
1077: null, //column sort order - not required for heap
1078: properties, TransactionController.IS_DEFAULT, this ,
1079: (long[]) null);
1080:
1081: closeBulkFetchScan();
1082:
1083: // Set the "estimated" row count
1084: ScanController compressHeapSC = tc.openScan(newHeapConglom,
1085: false, TransactionController.OPENMODE_FORUPDATE,
1086: TransactionController.MODE_TABLE,
1087: TransactionController.ISOLATION_SERIALIZABLE,
1088: (FormatableBitSet) null, (DataValueDescriptor[]) null,
1089: 0, (Qualifier[][]) null, (DataValueDescriptor[]) null,
1090: 0);
1091:
1092: compressHeapSC.setEstimatedRowCount(rowCount);
1093:
1094: compressHeapSC.close();
1095: compressHeapSC = null; // RESOLVE DJD CLEANUP
1096:
1097: /*
1098: ** Inform the data dictionary that we are about to write to it.
1099: ** There are several calls to data dictionary "get" methods here
1100: ** that might be done in "read" mode in the data dictionary, but
1101: ** it seemed safer to do this whole operation in "write" mode.
1102: **
1103: ** We tell the data dictionary we're done writing at the end of
1104: ** the transaction.
1105: */
1106: dd.startWriting(lcc);
1107:
1108: // Update all indexes
1109: if (compressIRGs.length > 0) {
1110: updateAllIndexes(newHeapConglom, dd);
1111: }
1112:
1113: /* Update the DataDictionary
1114: * RESOLVE - this will change in 1.4 because we will get
1115: * back the same conglomerate number
1116: */
1117: // Get the ConglomerateDescriptor for the heap
1118: long oldHeapConglom = td.getHeapConglomerateId();
1119: ConglomerateDescriptor cd = td
1120: .getConglomerateDescriptor(oldHeapConglom);
1121:
1122: // Update sys.sysconglomerates with new conglomerate #
1123: dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
1124: // Drop the old conglomerate
1125: tc.dropConglomerate(oldHeapConglom);
1126: cleanUp();
1127: }
1128:
1129: /*
1130: * TRUNCATE TABLE TABLENAME; (quickly removes all the rows from table and
1131: * it's correctponding indexes).
1132: * Truncate is implemented by dropping the existing conglomerates(heap,indexes) and recreating a
1133: * new ones with the properties of dropped conglomerates. Currently Store
1134: * does not have support to truncate existing conglomerated until store
1135: * supports it , this is the only way to do it.
1136: * Error Cases: Truncate error cases same as other DDL's statements except
1137: * 1)Truncate is not allowed when the table is references by another table.
1138: * 2)Truncate is not allowed when there are enabled delete triggers on the table.
1139: * Note: Because conglomerate number is changed during recreate process all the statements will be
1140: * marked as invalide and they will get recompiled internally on their next
1141: * execution. This is okay because truncate makes the number of rows to zero
1142: * it may be good idea to recompile them becuase plans are likely to be
1143: * incorrect. Recompile is done internally by cloudscape, user does not have
1144: * any effect.
1145: */
1146: private void truncateTable(Activation activation)
1147: throws StandardException {
1148: ExecRow emptyHeapRow;
1149: long newHeapConglom;
1150: Properties properties = new Properties();
1151: RowLocation rl;
1152: this .lcc = activation.getLanguageConnectionContext();
1153: this .dd = lcc.getDataDictionary();
1154: this .dm = dd.getDependencyManager();
1155: this .tc = lcc.getTransactionExecute();
1156: this .activation = activation;
1157:
1158: if (SanityManager.DEBUG) {
1159: if (lockGranularity != '\0') {
1160: SanityManager
1161: .THROWASSERT("lockGranularity expected to be '\0', not "
1162: + lockGranularity);
1163: }
1164: SanityManager.ASSERT(columnInfo == null,
1165: "columnInfo expected to be null");
1166: SanityManager.ASSERT(constraintActions == null,
1167: "constraintActions expected to be null");
1168: }
1169:
1170: //truncate table is not allowed if there are any tables referencing it.
1171: //except if it is self referencing.
1172: ConstraintDescriptorList cdl = dd.getConstraintDescriptors(td);
1173: for (int index = 0; index < cdl.size(); index++) {
1174: ConstraintDescriptor cd = cdl.elementAt(index);
1175: if (cd instanceof ReferencedKeyConstraintDescriptor) {
1176: ReferencedKeyConstraintDescriptor rfcd = (ReferencedKeyConstraintDescriptor) cd;
1177: if (rfcd
1178: .hasNonSelfReferencingFK(ConstraintDescriptor.ENABLED)) {
1179: throw StandardException
1180: .newException(
1181: SQLState.LANG_NO_TRUNCATE_ON_FK_REFERENCE_TABLE,
1182: td.getName());
1183: }
1184: }
1185: }
1186:
1187: //truncate is not allowed when there are enabled DELETE triggers
1188: GenericDescriptorList tdl = dd.getTriggerDescriptors(td);
1189: Enumeration descs = tdl.elements();
1190: while (descs.hasMoreElements()) {
1191: TriggerDescriptor trd = (TriggerDescriptor) descs
1192: .nextElement();
1193: if (trd
1194: .listensForEvent(TriggerDescriptor.TRIGGER_EVENT_DELETE)
1195: && trd.isEnabled()) {
1196: throw StandardException
1197: .newException(
1198: SQLState.LANG_NO_TRUNCATE_ON_ENABLED_DELETE_TRIGGERS,
1199: td.getName(), trd.getName());
1200: }
1201: }
1202:
1203: //gather information from the existing conglomerate to create new one.
1204: emptyHeapRow = td.getEmptyExecRow(lcc.getContextManager());
1205: compressHeapCC = tc.openConglomerate(
1206: td.getHeapConglomerateId(), false,
1207: TransactionController.OPENMODE_FORUPDATE,
1208: TransactionController.MODE_TABLE,
1209: TransactionController.ISOLATION_SERIALIZABLE);
1210:
1211: // invalidate any prepared statements that
1212: // depended on this table (including this one)
1213: // bug 3653 has threads that start up and block on our lock, but do
1214: // not see they have to recompile their plan. We now invalidate earlier
1215: // however they still might recompile using the old conglomerate id before we
1216: // commit our DD changes.
1217: //
1218: dm.invalidateFor(td, DependencyManager.TRUNCATE_TABLE, lcc);
1219:
1220: rl = compressHeapCC.newRowLocationTemplate();
1221: // Get the properties on the old heap
1222: compressHeapCC.getInternalTablePropertySet(properties);
1223: compressHeapCC.close();
1224: compressHeapCC = null;
1225:
1226: //create new conglomerate
1227: newHeapConglom = tc.createConglomerate("heap", emptyHeapRow
1228: .getRowArray(), null, //column sort order - not required for heap
1229: properties, TransactionController.IS_DEFAULT);
1230:
1231: /* Set up index info to perform truncate on them*/
1232: getAffectedIndexes(activation);
1233: if (numIndexes > 0) {
1234: indexRows = new ExecIndexRow[numIndexes];
1235: ordering = new ColumnOrdering[numIndexes][];
1236: for (int index = 0; index < numIndexes; index++) {
1237: // create a single index row template for each index
1238: indexRows[index] = compressIRGs[index]
1239: .getIndexRowTemplate();
1240: compressIRGs[index].getIndexRow(emptyHeapRow, rl,
1241: indexRows[index], (FormatableBitSet) null);
1242: /* For non-unique indexes, we order by all columns + the RID.
1243: * For unique indexes, we just order by the columns.
1244: * No need to try to enforce uniqueness here as
1245: * index should be valid.
1246: */
1247: int[] baseColumnPositions = compressIRGs[index]
1248: .baseColumnPositions();
1249: boolean[] isAscending = compressIRGs[index]
1250: .isAscending();
1251: int numColumnOrderings;
1252: numColumnOrderings = baseColumnPositions.length + 1;
1253: ordering[index] = new ColumnOrdering[numColumnOrderings];
1254: for (int ii = 0; ii < numColumnOrderings - 1; ii++) {
1255: ordering[index][ii] = new IndexColumnOrder(ii,
1256: isAscending[ii]);
1257: }
1258: ordering[index][numColumnOrderings - 1] = new IndexColumnOrder(
1259: numColumnOrderings - 1);
1260: }
1261: }
1262:
1263: /*
1264: ** Inform the data dictionary that we are about to write to it.
1265: ** There are several calls to data dictionary "get" methods here
1266: ** that might be done in "read" mode in the data dictionary, but
1267: ** it seemed safer to do this whole operation in "write" mode.
1268: **
1269: ** We tell the data dictionary we're done writing at the end of
1270: ** the transaction.
1271: */
1272: dd.startWriting(lcc);
1273:
1274: // truncate all indexes
1275: if (numIndexes > 0) {
1276: long[] newIndexCongloms = new long[numIndexes];
1277: for (int index = 0; index < numIndexes; index++) {
1278: updateIndex(newHeapConglom, dd, index, newIndexCongloms);
1279: }
1280: }
1281:
1282: // Update the DataDictionary
1283: // Get the ConglomerateDescriptor for the heap
1284: long oldHeapConglom = td.getHeapConglomerateId();
1285: ConglomerateDescriptor cd = td
1286: .getConglomerateDescriptor(oldHeapConglom);
1287:
1288: // Update sys.sysconglomerates with new conglomerate #
1289: dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
1290: // Drop the old conglomerate
1291: tc.dropConglomerate(oldHeapConglom);
1292: cleanUp();
1293: }
1294:
1295: /**
1296: * Update all of the indexes on a table when doing a bulk insert
1297: * on an empty table.
1298: *
1299: * @exception StandardException thrown on error
1300: */
1301: private void updateAllIndexes(long newHeapConglom, DataDictionary dd)
1302: throws StandardException {
1303: long[] newIndexCongloms = new long[numIndexes];
1304:
1305: /* Populate each index (one at a time or all at once). */
1306: if (sequential) {
1307: // First sorter populated during heap compression
1308: if (numIndexes >= 1) {
1309: updateIndex(newHeapConglom, dd, 0, newIndexCongloms);
1310: }
1311: for (int index = 1; index < numIndexes; index++) {
1312: // Scan heap and populate next sorter
1313: openBulkFetchScan(newHeapConglom);
1314: while (getNextRowFromRowSource() != null) {
1315: objectifyStreamingColumns();
1316: insertIntoSorter(index,
1317: compressRL[currentCompressRow - 1]);
1318: }
1319: updateIndex(newHeapConglom, dd, index, newIndexCongloms);
1320: closeBulkFetchScan();
1321: }
1322: } else {
1323: for (int index = 0; index < numIndexes; index++) {
1324: updateIndex(newHeapConglom, dd, index, newIndexCongloms);
1325: }
1326: }
1327: }
1328:
1329: private void updateIndex(long newHeapConglom, DataDictionary dd,
1330: int index, long[] newIndexCongloms)
1331: throws StandardException {
1332: ConglomerateController indexCC;
1333: Properties properties = new Properties();
1334: ConglomerateDescriptor cd;
1335: // Get the ConglomerateDescriptor for the index
1336: cd = td
1337: .getConglomerateDescriptor(indexConglomerateNumbers[index]);
1338:
1339: // Build the properties list for the new conglomerate
1340: indexCC = tc.openConglomerate(indexConglomerateNumbers[index],
1341: false, TransactionController.OPENMODE_FORUPDATE,
1342: TransactionController.MODE_TABLE,
1343: TransactionController.ISOLATION_SERIALIZABLE);
1344:
1345: // Get the properties on the old index
1346: indexCC.getInternalTablePropertySet(properties);
1347:
1348: /* Create the properties that language supplies when creating the
1349: * the index. (The store doesn't preserve these.)
1350: */
1351: int indexRowLength = indexRows[index].nColumns();
1352: properties.put("baseConglomerateId", Long
1353: .toString(newHeapConglom));
1354: if (cd.getIndexDescriptor().isUnique()) {
1355: properties.put("nUniqueColumns", Integer
1356: .toString(indexRowLength - 1));
1357: } else {
1358: properties.put("nUniqueColumns", Integer
1359: .toString(indexRowLength));
1360: }
1361: properties.put("rowLocationColumn", Integer
1362: .toString(indexRowLength - 1));
1363: properties.put("nKeyFields", Integer.toString(indexRowLength));
1364:
1365: indexCC.close();
1366:
1367: // We can finally drain the sorter and rebuild the index
1368: // RESOLVE - all indexes are btrees right now
1369: // Populate the index.
1370:
1371: RowLocationRetRowSource cCount = null;
1372: boolean updateStatistics = false;
1373: if (!truncateTable) {
1374: sorters[index].close();
1375: sorters[index] = null;
1376:
1377: if (td.statisticsExist(cd)) {
1378: cCount = new CardinalityCounter(tc
1379: .openSortRowSource(sortIds[index]));
1380: updateStatistics = true;
1381: } else
1382: cCount = new CardinalityCounter(tc
1383: .openSortRowSource(sortIds[index]));
1384:
1385: newIndexCongloms[index] = tc.createAndLoadConglomerate(
1386: "BTREE", indexRows[index].getRowArray(),
1387: ordering[index], properties,
1388: TransactionController.IS_DEFAULT, cCount,
1389: (long[]) null);
1390:
1391: //For an index, if the statistics already exist, then drop them.
1392: //The statistics might not exist for an index if the index was
1393: //created when the table was empty.
1394: //At ALTER TABLE COMPRESS time, for both kinds of indexes
1395: //(ie one with preexisting statistics and with no statistics),
1396: //create statistics for them if the table is not empty.
1397: //DERBY-737 "SYSCS_UTIL.SYSCS_COMPRESS_TABLE should create
1398: //statistics if they do not exist"
1399: if (updateStatistics)
1400: dd.dropStatisticsDescriptors(td.getUUID(),
1401: cd.getUUID(), tc);
1402:
1403: long numRows;
1404: if ((numRows = ((CardinalityCounter) cCount).getRowCount()) > 0) {
1405: long[] c = ((CardinalityCounter) cCount)
1406: .getCardinality();
1407: for (int i = 0; i < c.length; i++) {
1408: StatisticsDescriptor statDesc = new StatisticsDescriptor(
1409: dd, dd.getUUIDFactory().createUUID(), cd
1410: .getUUID(), td.getUUID(), "I",
1411: new StatisticsImpl(numRows, c[i]), i + 1);
1412: dd.addDescriptor(statDesc,
1413: null, // no parent descriptor
1414: DataDictionary.SYSSTATISTICS_CATALOG_NUM,
1415: true, tc); // no error on duplicate.
1416: }
1417: }
1418: } else {
1419: newIndexCongloms[index] = tc.createConglomerate("BTREE",
1420: indexRows[index].getRowArray(), ordering[index],
1421: properties, TransactionController.IS_DEFAULT);
1422:
1423: //on truncate drop the statistics because we know for sure
1424: //rowscount is zero and existing statistic will be invalid.
1425: if (td.statisticsExist(cd))
1426: dd.dropStatisticsDescriptors(td.getUUID(),
1427: cd.getUUID(), tc);
1428: }
1429:
1430: /* Update the DataDictionary
1431: * RESOLVE - this will change in 1.4 because we will get
1432: * back the same conglomerate number
1433: *
1434: * Update sys.sysconglomerates with new conglomerate #, we need to
1435: * update all (if any) duplicate index entries sharing this same
1436: * conglomerate.
1437: */
1438: dd
1439: .updateConglomerateDescriptor(
1440: td
1441: .getConglomerateDescriptors(indexConglomerateNumbers[index]),
1442: newIndexCongloms[index], tc);
1443:
1444: // Drop the old conglomerate
1445: tc.dropConglomerate(indexConglomerateNumbers[index]);
1446: }
1447:
1448: /**
1449: * Get info on the indexes on the table being compressed.
1450: *
1451: * @exception StandardException Thrown on error
1452: */
1453: private void getAffectedIndexes(Activation activation)
1454: throws StandardException {
1455: IndexLister indexLister = td.getIndexLister();
1456:
1457: /* We have to get non-distinct index row generaters and conglom numbers
1458: * here and then compress it to distinct later because drop column
1459: * will need to change the index descriptor directly on each index
1460: * entry in SYSCONGLOMERATES, on duplicate indexes too.
1461: */
1462: compressIRGs = indexLister.getIndexRowGenerators();
1463: numIndexes = compressIRGs.length;
1464: indexConglomerateNumbers = indexLister
1465: .getIndexConglomerateNumbers();
1466:
1467: if (!(compressTable || truncateTable)) // then it's drop column
1468: {
1469: for (int i = 0; i < compressIRGs.length; i++) {
1470: int[] baseColumnPositions = compressIRGs[i]
1471: .baseColumnPositions();
1472: int j;
1473: for (j = 0; j < baseColumnPositions.length; j++)
1474: if (baseColumnPositions[j] == columnPosition)
1475: break;
1476: if (j == baseColumnPositions.length) // not related
1477: continue;
1478:
1479: if (baseColumnPositions.length == 1
1480: || (behavior == StatementType.DROP_CASCADE && compressIRGs[i]
1481: .isUnique())) {
1482: numIndexes--;
1483: /* get first conglomerate with this conglom number each time
1484: * and each duplicate one will be eventually all dropped
1485: */
1486: ConglomerateDescriptor cd = td
1487: .getConglomerateDescriptor(indexConglomerateNumbers[i]);
1488: DropIndexConstantAction.dropIndex(dm, dd, tc, cd,
1489: td, activation
1490: .getLanguageConnectionContext());
1491:
1492: compressIRGs[i] = null; // mark it
1493: continue;
1494: }
1495: // give an error for unique index on multiple columns including
1496: // the column we are to drop (restrict), such index is not for
1497: // a constraint, because constraints have already been handled
1498: if (compressIRGs[i].isUnique()) {
1499: ConglomerateDescriptor cd = td
1500: .getConglomerateDescriptor(indexConglomerateNumbers[i]);
1501: throw StandardException
1502: .newException(
1503: SQLState.LANG_PROVIDER_HAS_DEPENDENT_OBJECT,
1504: dm
1505: .getActionString(DependencyManager.DROP_COLUMN),
1506: columnInfo[0].name, "UNIQUE INDEX",
1507: cd.getConglomerateName());
1508: }
1509: }
1510: IndexRowGenerator[] newIRGs = new IndexRowGenerator[numIndexes];
1511: long[] newIndexConglomNumbers = new long[numIndexes];
1512:
1513: for (int i = 0, j = 0; i < numIndexes; i++, j++) {
1514: while (compressIRGs[j] == null)
1515: j++;
1516:
1517: int[] baseColumnPositions = compressIRGs[j]
1518: .baseColumnPositions();
1519: newIRGs[i] = compressIRGs[j];
1520: newIndexConglomNumbers[i] = indexConglomerateNumbers[j];
1521:
1522: boolean[] isAscending = compressIRGs[j].isAscending();
1523: boolean reMakeArrays = false;
1524: int size = baseColumnPositions.length;
1525: for (int k = 0; k < size; k++) {
1526: if (baseColumnPositions[k] > columnPosition)
1527: baseColumnPositions[k]--;
1528: else if (baseColumnPositions[k] == columnPosition) {
1529: baseColumnPositions[k] = 0; // mark it
1530: reMakeArrays = true;
1531: }
1532: }
1533: if (reMakeArrays) {
1534: size--;
1535: int[] newBCP = new int[size];
1536: boolean[] newIsAscending = new boolean[size];
1537: for (int k = 0, step = 0; k < size; k++) {
1538: if (step == 0
1539: && baseColumnPositions[k + step] == 0)
1540: step++;
1541: newBCP[k] = baseColumnPositions[k + step];
1542: newIsAscending[k] = isAscending[k + step];
1543: }
1544: IndexDescriptor id = compressIRGs[j]
1545: .getIndexDescriptor();
1546: id.setBaseColumnPositions(newBCP);
1547: id.setIsAscending(newIsAscending);
1548: id.setNumberOfOrderedColumns(id
1549: .numberOfOrderedColumns() - 1);
1550: }
1551: }
1552: compressIRGs = newIRGs;
1553: indexConglomerateNumbers = newIndexConglomNumbers;
1554: }
1555:
1556: /* Now we are done with updating each index descriptor entry directly
1557: * in SYSCONGLOMERATES (for duplicate index as well), from now on, our
1558: * work should apply ONLY once for each real conglomerate, so we
1559: * compress any duplicate indexes now.
1560: */
1561: Object[] compressIndexResult = compressIndexArrays(
1562: indexConglomerateNumbers, compressIRGs);
1563:
1564: if (compressIndexResult != null) {
1565: indexConglomerateNumbers = (long[]) compressIndexResult[1];
1566: compressIRGs = (IndexRowGenerator[]) compressIndexResult[2];
1567: numIndexes = indexConglomerateNumbers.length;
1568: }
1569:
1570: indexedCols = new FormatableBitSet(compressTable
1571: || truncateTable ? td.getNumberOfColumns() + 1 : td
1572: .getNumberOfColumns());
1573: for (int index = 0; index < numIndexes; index++) {
1574: int[] colIds = compressIRGs[index].getIndexDescriptor()
1575: .baseColumnPositions();
1576:
1577: for (int index2 = 0; index2 < colIds.length; index2++) {
1578: indexedCols.set(colIds[index2]);
1579: }
1580: }
1581: }
1582:
1583: /**
1584: * Set up to update all of the indexes on a table when doing a bulk insert
1585: * on an empty table.
1586: *
1587: * @exception StandardException thrown on error
1588: */
1589: private void setUpAllSorts(ExecRow sourceRow, RowLocation rl)
1590: throws StandardException {
1591: ordering = new ColumnOrdering[numIndexes][];
1592:
1593: needToDropSort = new boolean[numIndexes];
1594: sortIds = new long[numIndexes];
1595:
1596: /* For each index, build a single index row and a sorter. */
1597: for (int index = 0; index < numIndexes; index++) {
1598: // create a single index row template for each index
1599: indexRows[index] = compressIRGs[index]
1600: .getIndexRowTemplate();
1601:
1602: // Get an index row based on the base row
1603: // (This call is only necessary here because we need to pass a template to the sorter.)
1604: compressIRGs[index].getIndexRow(sourceRow, rl,
1605: indexRows[index], (FormatableBitSet) null);
1606:
1607: /* For non-unique indexes, we order by all columns + the RID.
1608: * For unique indexes, we just order by the columns.
1609: * No need to try to enforce uniqueness here as
1610: * index should be valid.
1611: */
1612: int[] baseColumnPositions = compressIRGs[index]
1613: .baseColumnPositions();
1614: boolean[] isAscending = compressIRGs[index].isAscending();
1615: int numColumnOrderings;
1616: SortObserver sortObserver = null;
1617: /* We can only reuse the wrappers when doing an
1618: * external sort if there is only 1 index. Otherwise,
1619: * we could get in a situation where 1 sort reuses a
1620: * wrapper that is still in use in another sort.
1621: */
1622: boolean reuseWrappers = (numIndexes == 1);
1623: numColumnOrderings = baseColumnPositions.length + 1;
1624: sortObserver = new BasicSortObserver(false, false,
1625: indexRows[index], reuseWrappers);
1626: ordering[index] = new ColumnOrdering[numColumnOrderings];
1627: for (int ii = 0; ii < numColumnOrderings - 1; ii++) {
1628: ordering[index][ii] = new IndexColumnOrder(ii,
1629: isAscending[ii]);
1630: }
1631: ordering[index][numColumnOrderings - 1] = new IndexColumnOrder(
1632: numColumnOrderings - 1);
1633:
1634: // create the sorters
1635: sortIds[index] = tc.createSort((Properties) null,
1636: indexRows[index].getRowArrayClone(),
1637: ordering[index], sortObserver, false, // not in order
1638: estimatedRowCount, // est rows
1639: -1 // est row size, -1 means no idea
1640: );
1641: }
1642:
1643: sorters = new SortController[numIndexes];
1644: // Open the sorts
1645: for (int index = 0; index < numIndexes; index++) {
1646: sorters[index] = tc.openSort(sortIds[index]);
1647: needToDropSort[index] = true;
1648: }
1649: }
1650:
1651: // RowSource interface
1652:
1653: /**
1654: * @see RowSource#getValidColumns
1655: */
1656: public FormatableBitSet getValidColumns() {
1657: // All columns are valid
1658: return null;
1659: }
1660:
1661: /**
1662: * @see RowSource#getNextRowFromRowSource
1663: * @exception StandardException on error
1664: */
1665: public DataValueDescriptor[] getNextRowFromRowSource()
1666: throws StandardException {
1667: currentRow = null;
1668: // Time for a new bulk fetch?
1669: if ((!doneScan)
1670: && (currentCompressRow == bulkFetchSize || !validRow[currentCompressRow])) {
1671: int bulkFetched = 0;
1672:
1673: bulkFetched = compressHeapGSC.fetchNextGroup(baseRowArray,
1674: compressRL);
1675:
1676: doneScan = (bulkFetched != bulkFetchSize);
1677: currentCompressRow = 0;
1678: rowCount += bulkFetched;
1679: for (int index = 0; index < bulkFetched; index++) {
1680: validRow[index] = true;
1681: }
1682: for (int index = bulkFetched; index < bulkFetchSize; index++) {
1683: validRow[index] = false;
1684: }
1685: }
1686:
1687: if (validRow[currentCompressRow]) {
1688: if (compressTable)
1689: currentRow = baseRow[currentCompressRow];
1690: else {
1691: if (currentRow == null)
1692: currentRow = activation
1693: .getExecutionFactory()
1694: .getValueRow(
1695: baseRowArray[currentCompressRow].length - 1);
1696: for (int i = 0; i < currentRow.nColumns(); i++) {
1697: currentRow
1698: .setColumn(
1699: i + 1,
1700: i < columnPosition - 1 ? baseRow[currentCompressRow]
1701: .getColumn(i + 1)
1702: : baseRow[currentCompressRow]
1703: .getColumn(i + 1 + 1));
1704: }
1705: }
1706: currentCompressRow++;
1707: }
1708:
1709: if (currentRow != null) {
1710: /* Let the target preprocess the row. For now, this
1711: * means doing an in place clone on any indexed columns
1712: * to optimize cloning and so that we don't try to drain
1713: * a stream multiple times.
1714: */
1715: if (compressIRGs.length > 0) {
1716: /* Do in-place cloning of all of the key columns */
1717: currentRow = currentRow.getClone(indexedCols);
1718: }
1719:
1720: return currentRow.getRowArray();
1721: }
1722:
1723: return null;
1724: }
1725:
1726: /**
1727: * @see RowSource#needsToClone
1728: */
1729: public boolean needsToClone() {
1730: return (true);
1731: }
1732:
1733: /**
1734: * @see RowSource#closeRowSource
1735: */
1736: public void closeRowSource() {
1737: // Do nothing here - actual work will be done in close()
1738: }
1739:
1740: // RowLocationRetRowSource interface
1741:
1742: /**
1743: * @see RowLocationRetRowSource#needsRowLocation
1744: */
1745: public boolean needsRowLocation() {
1746: // Only true if table has indexes
1747: return (numIndexes > 0);
1748: }
1749:
1750: /**
1751: * @see RowLocationRetRowSource#rowLocation
1752: * @exception StandardException on error
1753: */
1754: public void rowLocation(RowLocation rl) throws StandardException {
1755: /* Set up sorters, etc. if 1st row and there are indexes */
1756: if (compressIRGs.length > 0) {
1757: objectifyStreamingColumns();
1758:
1759: /* Put the row into the indexes. If sequential,
1760: * then we only populate the 1st sorter when compressing
1761: * the heap.
1762: */
1763: int maxIndex = compressIRGs.length;
1764: if (maxIndex > 1 && sequential) {
1765: maxIndex = 1;
1766: }
1767: for (int index = 0; index < maxIndex; index++) {
1768: insertIntoSorter(index, rl);
1769: }
1770: }
1771: }
1772:
1773: private void objectifyStreamingColumns() throws StandardException {
1774: // Objectify any the streaming columns that are indexed.
1775: for (int i = 0; i < currentRow.getRowArray().length; i++) {
1776: /* Object array is 0-based,
1777: * indexedCols is 1-based.
1778: */
1779: if (!indexedCols.get(i + 1)) {
1780: continue;
1781: }
1782:
1783: if (currentRow.getRowArray()[i] instanceof StreamStorable) {
1784: ((DataValueDescriptor) currentRow.getRowArray()[i])
1785: .getObject();
1786: }
1787: }
1788: }
1789:
1790: private void insertIntoSorter(int index, RowLocation rl)
1791: throws StandardException {
1792: // Get a new object Array for the index
1793: indexRows[index].getNewObjectArray();
1794: // Associate the index row with the source row
1795: compressIRGs[index].getIndexRow(currentRow, (RowLocation) rl
1796: .cloneObject(), indexRows[index],
1797: (FormatableBitSet) null);
1798:
1799: // Insert the index row into the matching sorter
1800: sorters[index].insert(indexRows[index].getRowArray());
1801: }
1802:
1803: /**
1804: * @see ResultSet#cleanUp
1805: *
1806: * @exception StandardException Thrown on error
1807: */
1808: public void cleanUp() throws StandardException {
1809: if (compressHeapCC != null) {
1810: compressHeapCC.close();
1811: compressHeapCC = null;
1812: }
1813:
1814: if (compressHeapGSC != null) {
1815: closeBulkFetchScan();
1816: }
1817:
1818: // Close each sorter
1819: if (sorters != null) {
1820: for (int index = 0; index < compressIRGs.length; index++) {
1821: if (sorters[index] != null) {
1822: sorters[index].close();
1823: }
1824: sorters[index] = null;
1825: }
1826: }
1827:
1828: if (needToDropSort != null) {
1829: for (int index = 0; index < needToDropSort.length; index++) {
1830: if (needToDropSort[index]) {
1831: tc.dropSort(sortIds[index]);
1832: needToDropSort[index] = false;
1833: }
1834: }
1835: }
1836: }
1837:
1838: // class implementation
1839:
1840: /**
1841: * Return the "semi" row count of a table. We are only interested in
1842: * whether the table has 0, 1 or > 1 rows.
1843: *
1844: *
1845: * @return Number of rows (0, 1 or > 1) in table.
1846: *
1847: * @exception StandardException Thrown on failure
1848: */
1849: private int getSemiRowCount(TransactionController tc)
1850: throws StandardException {
1851: int numRows = 0;
1852:
1853: ScanController sc = tc.openScan(
1854: td.getHeapConglomerateId(),
1855: false, // hold
1856: 0, // open read only
1857: TransactionController.MODE_TABLE,
1858: TransactionController.ISOLATION_SERIALIZABLE,
1859: RowUtil.EMPTY_ROW_BITSET, // scanColumnList
1860: null, // start position
1861: ScanController.GE, // startSearchOperation
1862: null, // scanQualifier
1863: null, //stop position - through last row
1864: ScanController.GT); // stopSearchOperation
1865:
1866: while (sc.next()) {
1867: numRows++;
1868:
1869: // We're only interested in whether the table has 0, 1 or > 1 rows
1870: if (numRows == 2) {
1871: break;
1872: }
1873: }
1874: sc.close();
1875:
1876: return numRows;
1877: }
1878:
1879: /**
1880: * Update a new column with its default.
1881: * We could do the scan ourself here, but
1882: * instead we get a nested connection and
1883: * issue the appropriate update statement.
1884: *
1885: * @param columnName column name
1886: * @param defaultText default text
1887: * @param lcc the language connection context
1888: *
1889: * @exception StandardException if update to default fails
1890: */
1891: private void updateNewColumnToDefault(Activation activation,
1892: String columnName, String defaultText,
1893: LanguageConnectionContext lcc) throws StandardException {
1894: /* Need to use delimited identifiers for all object names
1895: * to ensure correctness.
1896: */
1897: String updateStmt = "UPDATE \"" + td.getSchemaName() + "\".\""
1898: + td.getName() + "\" SET \"" + columnName + "\" = "
1899: + defaultText;
1900:
1901: AlterTableConstantAction.executeUpdate(lcc, updateStmt);
1902: }
1903:
1904: private static void executeUpdate(LanguageConnectionContext lcc,
1905: String updateStmt) throws StandardException {
1906: PreparedStatement ps = lcc.prepareInternalStatement(updateStmt);
1907:
1908: // This is a substatement; for now, we do not set any timeout
1909: // for it. We might change this behaviour later, by linking
1910: // timeout to its parent statement's timeout settings.
1911: ResultSet rs = ps.execute(lcc, true, 0L);
1912: rs.close();
1913: rs.finish();
1914: }
1915:
1916: /**
1917: * computes the minimum/maximum value in a column of a table.
1918: */
1919: private long getColumnMax(Activation activation,
1920: TableDescriptor td, String columnName, long increment,
1921: long initial) throws StandardException {
1922: String maxStr = (increment > 0) ? "MAX" : "MIN";
1923: String maxStmt = "SELECT " + maxStr + "(\"" + columnName
1924: + "\")" + "FROM \"" + td.getSchemaName() + "\".\""
1925: + td.getName() + "\"";
1926:
1927: LanguageConnectionContext lcc = activation
1928: .getLanguageConnectionContext();
1929: PreparedStatement ps = lcc.prepareInternalStatement(maxStmt);
1930:
1931: // This is a substatement, for now we do not set any timeout for it
1932: // We might change this later by linking timeout to parent statement
1933: ResultSet rs = ps.execute(lcc, false, 0L);
1934: DataValueDescriptor[] rowArray = rs.getNextRow().getRowArray();
1935: rs.close();
1936: rs.finish();
1937:
1938: return rowArray[0].getLong();
1939: }
1940:
1941: private void dropAllColumnDefaults(UUID tableId, DataDictionary dd)
1942: throws StandardException {
1943: ColumnDescriptorList cdl = td.getColumnDescriptorList();
1944: int cdlSize = cdl.size();
1945:
1946: for (int index = 0; index < cdlSize; index++) {
1947: ColumnDescriptor cd = (ColumnDescriptor) cdl
1948: .elementAt(index);
1949:
1950: // If column has a default we drop the default and
1951: // any dependencies
1952: if (cd.getDefaultInfo() != null) {
1953: DefaultDescriptor defaultDesc = cd
1954: .getDefaultDescriptor(dd);
1955: dm.clearDependencies(lcc, defaultDesc);
1956: }
1957: }
1958: }
1959:
1960: private void openBulkFetchScan(long heapConglomNumber)
1961: throws StandardException {
1962: doneScan = false;
1963: compressHeapGSC = tc.openGroupFetchScan(
1964: heapConglomNumber,
1965: false, // hold
1966: 0, // open base table read only
1967: TransactionController.MODE_TABLE,
1968: TransactionController.ISOLATION_SERIALIZABLE, null, // all fields as objects
1969: (DataValueDescriptor[]) null, // startKeyValue
1970: 0, // not used when giving null start posn.
1971: null, // qualifier
1972: (DataValueDescriptor[]) null, // stopKeyValue
1973: 0); // not used when giving null stop posn.
1974: }
1975:
1976: private void closeBulkFetchScan() throws StandardException {
1977: compressHeapGSC.close();
1978: compressHeapGSC = null;
1979: }
1980:
1981: /**
1982: * Update values in a new autoincrement column being added to a table.
1983: * This is similar to updateNewColumnToDefault whereby we issue an
1984: * update statement using a nested connection. The UPDATE statement
1985: * uses a static method in ConnectionInfo (which is not documented)
1986: * which returns the next value to be inserted into the autoincrement
1987: * column.
1988: *
1989: * @param columnName autoincrement column name that is being added.
1990: * @param initial initial value of the autoincrement column.
1991: * @param increment increment value of the autoincrement column.
1992: *
1993: * @see #updateNewColumnToDefault
1994: */
1995: private void updateNewAutoincrementColumn(Activation activation,
1996: String columnName, long initial, long increment)
1997: throws StandardException {
1998: LanguageConnectionContext lcc = activation
1999: .getLanguageConnectionContext();
2000:
2001: // Don't throw an error in bind when we try to update the
2002: // autoincrement column.
2003: lcc.setAutoincrementUpdate(true);
2004:
2005: lcc.autoincrementCreateCounter(td.getSchemaName(),
2006: td.getName(), columnName, new Long(initial), increment,
2007: 0);
2008: // the sql query is.
2009: // UPDATE table
2010: // set ai_column = ConnectionInfo.nextAutoincrementValue(
2011: // schemaName, tableName,
2012: // columnName)
2013: String updateStmt = "UPDATE \"" + td.getSchemaName() + "\".\""
2014: + td.getName() + "\" SET \"" + columnName + "\" = "
2015: + "org.apache.derby.iapi.db.ConnectionInfo::"
2016: + "nextAutoincrementValue(" + "'" + td.getSchemaName()
2017: + "'" + "," + "'" + td.getName() + "'" + "," + "'"
2018: + columnName + "'" + ")";
2019:
2020: try {
2021: AlterTableConstantAction.executeUpdate(lcc, updateStmt);
2022: } catch (StandardException se) {
2023: if (se.getMessageId().equals(
2024: SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE)) {
2025: // If overflow, override with more meaningful message.
2026: throw StandardException.newException(
2027: SQLState.LANG_AI_OVERFLOW, se, td.getName(),
2028: columnName);
2029: }
2030: throw se;
2031: } finally {
2032: // and now update the autoincrement value.
2033: lcc.autoincrementFlushCache(td.getUUID());
2034: lcc.setAutoincrementUpdate(false);
2035: }
2036:
2037: }
2038:
2039: /**
2040: * Make sure that the columns are non null
2041: * If any column is nullable, check that the data is null.
2042: *
2043: * @param columnNames names of columns to be checked
2044: * @param nullCols true if corresponding column is nullable
2045: * @param numRows number of rows in the table
2046: * @param lcc language context
2047: * @param errorMsg error message to use for exception
2048: *
2049: * @return true if any nullable columns found (nullable columns must have
2050: * all non null data or exception is thrown
2051: * @exception StandardException on error
2052: */
2053: private boolean validateNotNullConstraint(String columnNames[],
2054: boolean nullCols[], int numRows,
2055: LanguageConnectionContext lcc, String errorMsg)
2056: throws StandardException {
2057: boolean foundNullable = false;
2058: StringBuffer constraintText = new StringBuffer();
2059:
2060: /*
2061: * Check for nullable columns and create a constraint string which can
2062: * be used in validateConstraint to check whether any of the
2063: * data is null.
2064: */
2065: for (int colCtr = 0; colCtr < columnNames.length; colCtr++) {
2066: ColumnDescriptor cd = td
2067: .getColumnDescriptor(columnNames[colCtr]);
2068:
2069: if (cd == null) {
2070: throw StandardException.newException(
2071: SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE,
2072: columnNames[colCtr], td.getName());
2073: }
2074:
2075: if (cd.getType().isNullable()) {
2076: if (numRows > 0) {
2077: // already found a nullable column so add "AND"
2078: if (foundNullable)
2079: constraintText.append(" AND ");
2080: constraintText.append(columnNames[colCtr]
2081: + " IS NOT NULL ");
2082: }
2083: foundNullable = true;
2084: nullCols[colCtr] = true;
2085: }
2086: }
2087:
2088: /* if the table has nullable columns and isn't empty
2089: * we need to validate the data
2090: */
2091: if (foundNullable && numRows > 0) {
2092: if (!ConstraintConstantAction.validateConstraint(
2093: (String) null, constraintText.toString(), td, lcc,
2094: false)) {
2095: if (errorMsg
2096: .equals(SQLState.LANG_NULL_DATA_IN_PRIMARY_KEY)) { //alter table add primary key
2097: throw StandardException.newException(
2098: SQLState.LANG_NULL_DATA_IN_PRIMARY_KEY, td
2099: .getQualifiedName());
2100: } else { //alter table modify column not null
2101: throw StandardException.newException(
2102: SQLState.LANG_NULL_DATA_IN_NON_NULL_COLUMN,
2103: td.getQualifiedName(), columnNames[0]);
2104: }
2105: }
2106: }
2107: return foundNullable;
2108: }
2109:
2110: /**
2111: * Get rid of duplicates from a set of index conglomerate numbers and
2112: * index descriptors.
2113: *
2114: * @param indexCIDS array of index conglomerate numbers
2115: * @param irgs array of index row generaters
2116: *
2117: * @return value: If no duplicates, returns NULL; otherwise,
2118: * a size-3 array of objects, first element is an
2119: * array of duplicates' indexes in the input arrays;
2120: * second element is the compact indexCIDs; third
2121: * element is the compact irgs.
2122: */
2123: private Object[] compressIndexArrays(long[] indexCIDS,
2124: IndexRowGenerator[] irgs) {
2125: /* An efficient way to compress indexes. From one end of workSpace,
2126: * we save unique conglom IDs; and from the other end we save
2127: * duplicate indexes' indexes. We save unique conglom IDs so that
2128: * we can do less amount of comparisons. This is efficient in
2129: * space as well. No need to use hash table.
2130: */
2131: long[] workSpace = new long[indexCIDS.length];
2132: int j = 0, k = indexCIDS.length - 1;
2133: for (int i = 0; i < indexCIDS.length; i++) {
2134: int m;
2135: for (m = 0; m < j; m++) // look up our unique set
2136: {
2137: if (indexCIDS[i] == workSpace[m]) // it's a duplicate
2138: {
2139: workSpace[k--] = i; // save dup index's index
2140: break;
2141: }
2142: }
2143: if (m == j)
2144: workSpace[j++] = indexCIDS[i]; // save unique conglom id
2145: }
2146: if (j < indexCIDS.length) // duplicate exists
2147: {
2148: long[] newIndexCIDS = new long[j];
2149: IndexRowGenerator[] newIrgs = new IndexRowGenerator[j];
2150: int[] duplicateIndexes = new int[indexCIDS.length - j];
2151: k = 0;
2152: // do everything in one loop
2153: for (int m = 0, n = indexCIDS.length - 1; m < indexCIDS.length; m++) {
2154: // we already gathered our indexCIDS and duplicateIndexes
2155: if (m < j)
2156: newIndexCIDS[m] = workSpace[m];
2157: else
2158: duplicateIndexes[indexCIDS.length - m - 1] = (int) workSpace[m];
2159:
2160: // stack up our irgs, indexSCOCIs, indexDCOCIs
2161: if ((n >= j) && (m == (int) workSpace[n]))
2162: n--;
2163: else {
2164: newIrgs[k] = irgs[m];
2165: k++;
2166: }
2167: }
2168:
2169: // construct return value
2170: Object[] returnValue = new Object[3]; // [indexSCOCIs == null ? 3 : 5];
2171: returnValue[0] = duplicateIndexes;
2172: returnValue[1] = newIndexCIDS;
2173: returnValue[2] = newIrgs;
2174: return returnValue;
2175: } else
2176: // no duplicates
2177: return null;
2178: }
2179:
2180: }
|