0001: /*
0002:
0003: Derby - Class org.apache.derby.impl.sql.execute.InsertResultSet
0004:
0005: Licensed to the Apache Software Foundation (ASF) under one or more
0006: contributor license agreements. See the NOTICE file distributed with
0007: this work for additional information regarding copyright ownership.
0008: The ASF licenses this file to you under the Apache License, Version 2.0
0009: (the "License"); you may not use this file except in compliance with
0010: the License. You may obtain a copy of the License at
0011:
0012: http://www.apache.org/licenses/LICENSE-2.0
0013:
0014: Unless required by applicable law or agreed to in writing, software
0015: distributed under the License is distributed on an "AS IS" BASIS,
0016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017: See the License for the specific language governing permissions and
0018: limitations under the License.
0019:
0020: */
0021:
0022: package org.apache.derby.impl.sql.execute;
0023:
0024: import org.apache.derby.iapi.services.loader.GeneratedMethod;
0025:
0026: import org.apache.derby.iapi.services.context.ContextManager;
0027:
0028: import org.apache.derby.iapi.services.monitor.Monitor;
0029:
0030: import org.apache.derby.iapi.services.sanity.SanityManager;
0031:
0032: import org.apache.derby.iapi.services.stream.HeaderPrintWriter;
0033: import org.apache.derby.iapi.services.stream.InfoStreams;
0034: import org.apache.derby.iapi.services.io.StreamStorable;
0035: import org.apache.derby.iapi.services.loader.GeneratedMethod;
0036:
0037: import org.apache.derby.iapi.error.StandardException;
0038:
0039: import org.apache.derby.iapi.sql.StatementUtil;
0040: import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
0041:
0042: import org.apache.derby.iapi.types.DataValueDescriptor;
0043: import org.apache.derby.iapi.types.TypeId;
0044: import org.apache.derby.iapi.types.RowLocation;
0045:
0046: import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
0047: import org.apache.derby.iapi.sql.dictionary.DataDictionary;
0048: import org.apache.derby.iapi.sql.dictionary.DataDictionaryContext;
0049: import org.apache.derby.iapi.sql.dictionary.IndexRowGenerator;
0050: import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
0051: import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
0052: import org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator;
0053: import org.apache.derby.iapi.sql.dictionary.StatisticsDescriptor;
0054: import org.apache.derby.iapi.sql.dictionary.TriggerDescriptor;
0055: import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;
0056: import org.apache.derby.iapi.sql.depend.DependencyManager;
0057:
0058: import org.apache.derby.iapi.sql.ResultColumnDescriptor;
0059:
0060: import org.apache.derby.iapi.reference.SQLState;
0061:
0062: import org.apache.derby.iapi.sql.execute.ConstantAction;
0063: import org.apache.derby.iapi.sql.execute.CursorResultSet;
0064: import org.apache.derby.iapi.sql.execute.ExecIndexRow;
0065: import org.apache.derby.iapi.sql.execute.ExecRow;
0066: import org.apache.derby.iapi.sql.execute.RowChanger;
0067: import org.apache.derby.iapi.sql.execute.NoPutResultSet;
0068: import org.apache.derby.iapi.sql.execute.TargetResultSet;
0069:
0070: import org.apache.derby.iapi.types.NumberDataValue;
0071:
0072: import org.apache.derby.iapi.sql.Activation;
0073: import org.apache.derby.iapi.sql.LanguageProperties;
0074: import org.apache.derby.iapi.sql.ResultDescription;
0075: import org.apache.derby.iapi.sql.ResultSet;
0076:
0077: import org.apache.derby.iapi.store.access.ColumnOrdering;
0078: import org.apache.derby.iapi.store.access.ConglomerateController;
0079: import org.apache.derby.iapi.store.access.DynamicCompiledOpenConglomInfo;
0080: import org.apache.derby.iapi.store.access.GroupFetchScanController;
0081: import org.apache.derby.iapi.store.access.Qualifier;
0082: import org.apache.derby.iapi.store.access.RowLocationRetRowSource;
0083: import org.apache.derby.iapi.store.access.ScanController;
0084: import org.apache.derby.iapi.store.access.SortObserver;
0085: import org.apache.derby.iapi.store.access.SortController;
0086: import org.apache.derby.iapi.store.access.StaticCompiledOpenConglomInfo;
0087: import org.apache.derby.iapi.store.access.TransactionController;
0088:
0089: import org.apache.derby.impl.sql.execute.AutoincrementCounter;
0090: import org.apache.derby.impl.sql.execute.InternalTriggerExecutionContext;
0091:
0092: import org.apache.derby.catalog.UUID;
0093: import org.apache.derby.catalog.types.StatisticsImpl;
0094: import org.apache.derby.iapi.db.TriggerExecutionContext;
0095: import org.apache.derby.iapi.services.io.FormatableBitSet;
0096: import org.apache.derby.iapi.util.StringUtil;
0097:
0098: import java.util.Enumeration;
0099: import java.util.Hashtable;
0100: import java.util.Properties;
0101: import java.util.Vector;
0102:
0103: /**
0104: * Insert the rows from the source into the specified
0105: * base table. This will cause constraints to be checked
0106: * and triggers to be executed based on the c's and t's
0107: * compiled into the insert plan.
0108: */
0109: class InsertResultSet extends DMLWriteResultSet implements
0110: TargetResultSet {
0111: // RESOLVE. Embarassingly large public state. If we could move the Replication
0112: // code into the same package, then these variables could be protected.
0113:
0114: // passed in at construction time
0115:
0116: private NoPutResultSet sourceResultSet;
0117: NoPutResultSet savedSource;
0118: InsertConstantAction constants;
0119: private GeneratedMethod checkGM;
0120: private long heapConglom;
0121:
0122: //following is for jdbc3.0 feature auto generated keys resultset
0123: private ResultSet autoGeneratedKeysResultSet;
0124: private TemporaryRowHolderImpl autoGeneratedKeysRowsHolder;
0125:
0126: // divined at run time
0127:
0128: private ResultDescription resultDescription;
0129: private RowChanger rowChanger;
0130:
0131: private TransactionController tc;
0132: private ExecRow row;
0133:
0134: boolean userSpecifiedBulkInsert;
0135: boolean bulkInsertPerformed;
0136:
0137: // bulkInsert
0138: protected boolean bulkInsert;
0139: private boolean bulkInsertReplace;
0140: private boolean firstRow = true;
0141: private boolean[] needToDropSort;
0142:
0143: /*
0144: ** This hashtable is used to convert an index conglomerate
0145: ** from it's old conglom number to the new one. It is
0146: ** bulk insert specific.
0147: */
0148: private Hashtable indexConversionTable;
0149:
0150: // indexedCols is 1-based
0151: private FormatableBitSet indexedCols;
0152: private ConglomerateController bulkHeapCC;
0153:
0154: protected DataDictionary dd;
0155: protected TableDescriptor td;
0156:
0157: private ExecIndexRow[] indexRows;
0158: private ExecRow fullTemplate;
0159: private long[] sortIds;
0160: private RowLocationRetRowSource[] rowSources;
0161: private ScanController bulkHeapSC;
0162: private ColumnOrdering[][] ordering;
0163: private SortController[] sorters;
0164: private TemporaryRowHolderImpl rowHolder;
0165: private RowLocation rl;
0166:
0167: private boolean hasBeforeStatementTrigger;
0168: private boolean hasBeforeRowTrigger;
0169: private BulkTableScanResultSet tableScan;
0170:
0171: private int numOpens;
0172: private boolean firstExecute;
0173:
0174: // cached across open()s
0175: private FKInfo[] fkInfoArray;
0176: private TriggerInfo triggerInfo;
0177: private RISetChecker fkChecker;
0178: private TriggerEventActivator triggerActivator;
0179: /**
0180: * keeps track of autoincrement values that are generated by
0181: * getSetAutoincrementValues.
0182: */
0183: private NumberDataValue aiCache[];
0184:
0185: /**
0186: * If set to true, implies that this (rep)insertresultset has generated
0187: * autoincrement values. During refresh for example, the autoincrement
0188: * values are not generated but sent from the source to target or
0189: * vice-versa.
0190: */
0191: protected boolean autoincrementGenerated;
0192: private long identityVal; //support of IDENTITY_LOCAL_VAL function
0193: private boolean setIdentity;
0194:
0195: /**
0196: * Returns the description of the inserted rows.
0197: * REVISIT: Do we want this to return NULL instead?
0198: */
0199: public ResultDescription getResultDescription() {
0200: return resultDescription;
0201: }
0202:
0203: // TargetResultSet interface
0204:
0205: /**
0206: * @see TargetResultSet#changedRow
0207: *
0208: * @exception StandardException thrown if cursor finish ed.
0209: */
0210: public void changedRow(ExecRow execRow, RowLocation rowLocation)
0211: throws StandardException {
0212: if (SanityManager.DEBUG) {
0213: SanityManager.ASSERT(bulkInsert,
0214: "bulkInsert exected to be true");
0215: }
0216:
0217: /* Set up sorters, etc. if 1st row and there are indexes */
0218: if (constants.irgs.length > 0) {
0219: RowLocation rlClone = (RowLocation) rowLocation
0220: .cloneObject();
0221:
0222: // Objectify any the streaming columns that are indexed.
0223: for (int i = 0; i < execRow.getRowArray().length; i++) {
0224: if (!constants.indexedCols[i]) {
0225: continue;
0226: }
0227:
0228: if (execRow.getRowArray()[i] instanceof StreamStorable)
0229: ((DataValueDescriptor) execRow.getRowArray()[i])
0230: .getObject();
0231: }
0232:
0233: // Every index row will share the same row location, etc.
0234: if (firstRow) {
0235: firstRow = false;
0236: indexRows = new ExecIndexRow[constants.irgs.length];
0237: setUpAllSorts(execRow.getNewNullRow(), rlClone);
0238: }
0239:
0240: // Put the row into the indexes
0241: for (int index = 0; index < constants.irgs.length; index++) {
0242: // Get a new object Array for the index
0243: indexRows[index].getNewObjectArray();
0244: // Associate the index row with the source row
0245: constants.irgs[index].getIndexRow(execRow, rlClone,
0246: indexRows[index], (FormatableBitSet) null);
0247:
0248: // Insert the index row into the matching sorter
0249: sorters[index].insert(indexRows[index].getRowArray());
0250: }
0251: }
0252: }
0253:
0254: /**
0255: * Preprocess the source row. Apply any check constraints here.
0256: * Do an inplace cloning of all key columns. For triggers, if
0257: * we have a before row trigger, we fire it here if we can.
0258: * This is useful for bulk insert where the store stands between
0259: * the source and us.
0260: *
0261: * @param execRow The source row.
0262: *
0263: * @return The preprocessed source row.
0264: * @exception StandardException thrown on error
0265: */
0266: public ExecRow preprocessSourceRow(ExecRow execRow)
0267: throws StandardException {
0268: //System.out.println("preprocessrow is called ");
0269: /*
0270: ** We can process before row triggers now. All other
0271: ** triggers can only be fired after we have inserted
0272: ** all our rows.
0273: */
0274: if (hasBeforeRowTrigger) {
0275: // RESOLVE
0276: // Possibly dead code-- if there are triggers we don't do bulk insert.
0277: rowHolder.truncate();
0278: rowHolder.insert(execRow);
0279: triggerActivator.notifyEvent(TriggerEvents.BEFORE_INSERT,
0280: (CursorResultSet) null, rowHolder.getResultSet());
0281: }
0282:
0283: if (checkGM != null && !hasBeforeStatementTrigger) {
0284: evaluateCheckConstraints();
0285: }
0286: // RESOLVE - optimize the cloning
0287: if (constants.irgs.length > 0) {
0288: /* Do in-place cloning of all of the key columns */
0289: return execRow.getClone(indexedCols);
0290: } else {
0291: return execRow;
0292: }
0293: }
0294:
0295: /**
0296: * Run the check constraints against the current row. Raise an error if
0297: * a check constraint is violated.
0298: *
0299: * @exception StandardException thrown on error
0300: */
0301: private void evaluateCheckConstraints() throws StandardException {
0302: if (checkGM != null) {
0303:
0304: // Evaluate the check constraints. The expression evaluation
0305: // will throw an exception if there is a violation, so there
0306: // is no need to check the result of the expression.
0307: checkGM.invoke(activation);
0308: }
0309: }
0310:
0311: /*
0312: * class interface
0313: *
0314: */
0315: /**
0316: *
0317: * @exception StandardException Thrown on error
0318: */
0319: InsertResultSet(NoPutResultSet source, GeneratedMethod checkGM,
0320: Activation activation) throws StandardException {
0321: super (activation);
0322: sourceResultSet = source;
0323: constants = (InsertConstantAction) constantAction;
0324: this .checkGM = checkGM;
0325: heapConglom = constants.conglomId;
0326:
0327: tc = activation.getTransactionController();
0328: fkInfoArray = constants.getFKInfo(lcc.getExecutionContext());
0329: triggerInfo = constants.getTriggerInfo(lcc
0330: .getExecutionContext());
0331:
0332: /*
0333: ** If we have a before statement trigger, then
0334: ** we cannot check constraints inline.
0335: */
0336: hasBeforeStatementTrigger = (triggerInfo != null) ? triggerInfo
0337: .hasTrigger(true, false) : false;
0338:
0339: hasBeforeRowTrigger = (triggerInfo != null) ? triggerInfo
0340: .hasTrigger(true, true) : false;
0341:
0342: resultDescription = sourceResultSet.getResultDescription();
0343:
0344: // Is this a bulkInsert or regular insert?
0345: String insertMode = constants.getProperty("insertMode");
0346:
0347: RowLocation[] rla;
0348:
0349: if ((rla = constants.getAutoincRowLocation()) != null) {
0350: aiCache = new NumberDataValue[rla.length];
0351: for (int i = 0; i < resultDescription.getColumnCount(); i++) {
0352: if (rla[i] == null)
0353: continue;
0354: ResultColumnDescriptor rcd = resultDescription
0355: .getColumnDescriptor(i + 1);
0356: aiCache[i] = (NumberDataValue) rcd.getType().getNull();
0357: }
0358: }
0359:
0360: if (insertMode != null) {
0361: if (StringUtil
0362: .SQLEqualsIgnoreCase(insertMode, "BULKINSERT")) {
0363: userSpecifiedBulkInsert = true;
0364: } else if (StringUtil.SQLEqualsIgnoreCase(insertMode,
0365: "REPLACE")) {
0366: userSpecifiedBulkInsert = true;
0367: bulkInsertReplace = true;
0368: bulkInsert = true;
0369:
0370: /*
0371: ** For now, we don't allow bulk insert replace when
0372: ** there is a trigger.
0373: */
0374: if (triggerInfo != null) {
0375: TriggerDescriptor td = triggerInfo
0376: .getTriggerArray()[0];
0377: throw StandardException
0378: .newException(
0379: SQLState.LANG_NO_BULK_INSERT_REPLACE_WITH_TRIGGER_DURING_EXECUTION,
0380: constants.getTableName(), td
0381: .getName());
0382: }
0383: }
0384: }
0385:
0386: //System.out.println("new InsertResultSet " + sourceResultSet.getClass());
0387: }
0388:
0389: /**
0390: @exception StandardException Standard Cloudscape error policy
0391: */
0392: public void open() throws StandardException {
0393: // Remember if this is the 1st execution
0394: firstExecute = (rowChanger == null);
0395:
0396: autoincrementGenerated = false;
0397:
0398: dd = lcc.getDataDictionary();
0399:
0400: /*
0401: ** verify the auto-generated key columns list(ie there are no invalid column
0402: ** names or positions). This is done at at execution time because for a precompiled
0403: ** insert statement, user can specify different column selections for
0404: ** auto-generated keys.
0405: */
0406: if (activation.getAutoGeneratedKeysResultsetMode()) {
0407: if (activation.getAutoGeneratedKeysColumnIndexes() != null)
0408: verifyAutoGeneratedColumnsIndexes(activation
0409: .getAutoGeneratedKeysColumnIndexes());
0410: else if (activation.getAutoGeneratedKeysColumnNames() != null)
0411: verifyAutoGeneratedColumnsNames(activation
0412: .getAutoGeneratedKeysColumnNames());
0413: }
0414: rowCount = 0;
0415:
0416: if (numOpens++ == 0) {
0417: sourceResultSet.openCore();
0418: } else {
0419: sourceResultSet.reopenCore();
0420: }
0421:
0422: /* If the user specified bulkInsert (or replace) then we need
0423: * to get an exclusive table lock on the table. If it is a
0424: * regular bulk insert then we need to check to see if the
0425: * table is empty. (If not empty, then we end up doing a row
0426: * at a time insert.)
0427: */
0428: if (userSpecifiedBulkInsert) {
0429: if (!bulkInsertReplace) {
0430: bulkInsert = verifyBulkInsert();
0431: } else {
0432: getExclusiveTableLock();
0433: }
0434: }
0435:
0436: if (bulkInsert) {
0437: // Notify the source that we are the target
0438: sourceResultSet.setTargetResultSet(this );
0439: long baseTableConglom = bulkInsertCore(lcc, heapConglom);
0440:
0441: if (hasBeforeStatementTrigger) {
0442: tableScan = getTableScanResultSet(baseTableConglom);
0443:
0444: // fire BEFORE trigger, do this before checking constraints
0445: triggerActivator.notifyEvent(
0446: TriggerEvents.BEFORE_INSERT,
0447: (CursorResultSet) null, tableScan);
0448:
0449: // if we have a check constraint, we have
0450: // to do it the hard way now before we get
0451: // to our AFTER triggers.
0452: if (checkGM != null) {
0453: tableScan = getTableScanResultSet(baseTableConglom);
0454:
0455: try {
0456: ExecRow currRow = null;
0457: while ((currRow = tableScan.getNextRowCore()) != null) {
0458: // we have to set the source row so the check constraint
0459: // sees the correct row.
0460: sourceResultSet.setCurrentRow(currRow);
0461: evaluateCheckConstraints();
0462: }
0463: } finally {
0464: sourceResultSet.clearCurrentRow();
0465: }
0466: }
0467: }
0468:
0469: bulkValidateForeignKeys(tc, lcc.getContextManager());
0470:
0471: // if we have an AFTER trigger, let 'er rip
0472: if ((triggerInfo != null)
0473: && (triggerInfo.hasTrigger(false, true) || triggerInfo
0474: .hasTrigger(false, false))) {
0475: triggerActivator.notifyEvent(
0476: TriggerEvents.AFTER_INSERT,
0477: (CursorResultSet) null,
0478: getTableScanResultSet(baseTableConglom));
0479: }
0480: bulkInsertPerformed = true;
0481: } else {
0482: row = getNextRowCore(sourceResultSet);
0483: normalInsertCore(lcc, firstExecute);
0484: }
0485:
0486: /* Cache query plan text for source, before it gets blown away */
0487: if (lcc.getRunTimeStatisticsMode()) {
0488: /* savedSource nulled after run time statistics generation */
0489: savedSource = sourceResultSet;
0490: }
0491:
0492: /* autoGeneratedResultset for JDBC3. Nulled after statement execution is over
0493: (ie after it is saved off in LocalSatement object) */
0494: if (activation.getAutoGeneratedKeysResultsetMode())
0495: autoGeneratedKeysResultSet = autoGeneratedKeysRowsHolder
0496: .getResultSet();
0497: else
0498: autoGeneratedKeysResultSet = null;
0499:
0500: cleanUp();
0501:
0502: if (aiCache != null) {
0503: Hashtable aiHashtable = new Hashtable();
0504: int numColumns = aiCache.length;
0505: // this insert updated ai values, store them in some persistent
0506: // place so that I can see these values.
0507: for (int i = 0; i < numColumns; i++) {
0508: if (aiCache[i] == null)
0509: continue;
0510: aiHashtable.put(AutoincrementCounter.makeIdentity(
0511: constants.getSchemaName(), constants
0512: .getTableName(), constants
0513: .getColumnName(i)), new Long(aiCache[i]
0514: .getLong()));
0515: }
0516: InternalTriggerExecutionContext itec = (InternalTriggerExecutionContext) lcc
0517: .getTriggerExecutionContext();
0518: if (itec == null)
0519: lcc.copyHashtableToAIHT(aiHashtable);
0520: else
0521: itec.copyHashtableToAIHT(aiHashtable);
0522: }
0523:
0524: endTime = getCurrentTimeMillis();
0525: }
0526:
0527: /*
0528: * Verify that the auto-generated columns list (by position) has valid
0529: * column positions for the table.
0530: */
0531: private void verifyAutoGeneratedColumnsIndexes(int[] columnIndexes)
0532: throws StandardException {
0533: int size = columnIndexes.length;
0534: TableDescriptor td = dd
0535: .getTableDescriptor(constants.targetUUID);
0536:
0537: // all 1-based column ids.
0538: for (int i = 0; i < size; i++) {
0539: if (td.getColumnDescriptor(columnIndexes[i]) == null)
0540: throw StandardException.newException(
0541: SQLState.LANG_COLUMN_POSITION_NOT_FOUND,
0542: new Integer(columnIndexes[i]));
0543: }
0544: }
0545:
0546: /*
0547: * If user didn't provide columns list for auto-generated columns, then only include
0548: * columns with auto-generated values in the resultset. Those columns would be ones
0549: * with default value defined.
0550: */
0551: private int[] generatedColumnPositionsArray()
0552: throws StandardException {
0553: TableDescriptor td = dd
0554: .getTableDescriptor(constants.targetUUID);
0555: ColumnDescriptor cd;
0556: int size = td.getMaxColumnID();
0557:
0558: int[] generatedColumnPositionsArray = new int[size];
0559: int generatedColumnNumbers = 0;
0560: for (int i = 0; i < size; i++) {
0561: generatedColumnPositionsArray[i] = -1;
0562: }
0563:
0564: for (int i = 0; i < size; i++) {
0565: cd = td.getColumnDescriptor(i + 1);
0566: if (cd.isAutoincrement()) { //if the column has auto-increment value
0567: generatedColumnNumbers++;
0568: generatedColumnPositionsArray[i] = i + 1;
0569: } else if (cd.getDefaultValue() != null
0570: || cd.getDefaultInfo() != null) {//default value
0571: generatedColumnNumbers++;
0572: generatedColumnPositionsArray[i] = i + 1;
0573: }
0574: }
0575: int[] returnGeneratedColumnPositionsArray = new int[generatedColumnNumbers];
0576:
0577: for (int i = 0, j = 0; i < size; i++) {
0578: if (generatedColumnPositionsArray[i] != -1)
0579: returnGeneratedColumnPositionsArray[j++] = generatedColumnPositionsArray[i];
0580: }
0581:
0582: return returnGeneratedColumnPositionsArray;
0583: }
0584:
0585: /*
0586: * Remove duplicate columns from the array. Then use this array to generate a sub-set
0587: * of insert resultset to be returned for JDBC3.0 getGeneratedKeys() call.
0588: */
0589: private int[] uniqueColumnPositionArray(int[] columnIndexes)
0590: throws StandardException {
0591: int size = columnIndexes.length;
0592: TableDescriptor td = dd
0593: .getTableDescriptor(constants.targetUUID);
0594:
0595: //create an array of integer (the array size = number of columns in table)
0596: // valid column positions are 1...getMaxColumnID()
0597: int[] uniqueColumnIndexes = new int[td.getMaxColumnID()];
0598:
0599: int uniqueColumnNumbers = 0;
0600:
0601: //At the end of following loop, the uniqueColumnIndexes elements will not be 0 for user
0602: //selected auto-generated columns.
0603: for (int i = 0; i < size; i++) {
0604: if (uniqueColumnIndexes[columnIndexes[i] - 1] == 0) {
0605: uniqueColumnNumbers++;
0606: uniqueColumnIndexes[columnIndexes[i] - 1] = columnIndexes[i];
0607: }
0608: }
0609: int[] returnUniqueColumnIndexes = new int[uniqueColumnNumbers];
0610:
0611: //return just the column positions which are not marked 0 in the uniqueColumnIndexes array
0612: for (int i = 0, j = 0; i < uniqueColumnIndexes.length; i++) {
0613: if (uniqueColumnIndexes[i] != 0)
0614: returnUniqueColumnIndexes[j++] = uniqueColumnIndexes[i];
0615: }
0616:
0617: return returnUniqueColumnIndexes;
0618: }
0619:
0620: /**
0621: * Verify that the auto-generated columns list (by name) has valid
0622: * column names for the table. If all the column names are valid,
0623: * convert column names array to corresponding column positions array
0624: * Save that column positions array in activation. We do this to simplify the
0625: * rest of the logic(it only has to deal with column positions here after).
0626: *
0627: * @exception StandardException Thrown on error if invalid column
0628: * name in the list.
0629: */
0630: private void verifyAutoGeneratedColumnsNames(String[] columnNames)
0631: throws StandardException {
0632: int size = columnNames.length;
0633: int columnPositions[] = new int[size];
0634:
0635: TableDescriptor td = dd
0636: .getTableDescriptor(constants.targetUUID);
0637: ColumnDescriptor cd;
0638:
0639: for (int i = 0; i < size; i++) {
0640: if (columnNames[i] == null)
0641: throw StandardException.newException(
0642: SQLState.LANG_COLUMN_NAME_NOT_FOUND,
0643: columnNames[i]);
0644: cd = td.getColumnDescriptor(columnNames[i]);
0645: if (cd == null)
0646: throw StandardException.newException(
0647: SQLState.LANG_COLUMN_NAME_NOT_FOUND,
0648: columnNames[i]);
0649: else
0650: columnPositions[i] = cd.getPosition();
0651: }
0652: activation.setAutoGeneratedKeysResultsetInfo(columnPositions,
0653: null);
0654: }
0655:
0656: /**
0657: * @see ResultSet#getAutoGeneratedKeysResultset
0658: */
0659: public ResultSet getAutoGeneratedKeysResultset() {
0660: return autoGeneratedKeysResultSet;
0661: }
0662:
0663: /**
0664: * getSetAutoincrementValue will get the autoincrement value of the
0665: * columnPosition specified for the target table. If increment is
0666: * non-zero we will also update the autoincrement value.
0667: *
0668: * @param columnPosition position of the column in the table (1-based)
0669: * @param increment amount of increment.
0670: *
0671: * @exception StandardException if anything goes wrong.
0672: */
0673: public NumberDataValue getSetAutoincrementValue(int columnPosition,
0674: long increment) throws StandardException {
0675: long startValue = 0;
0676: NumberDataValue dvd;
0677: int index = columnPosition - 1; // all our indices are 0 based.
0678:
0679: /* As in DB2, only for single row insert: insert into t1(c1) values (..) do
0680: * we return the correct most recently generated identity column value. For
0681: * multiple row insert, or insert with sub-select, the return value is non-
0682: * deterministic, and is the previous return value of the IDENTITY_VAL_LOCAL
0683: * function, before the insert statement. Also, DB2 can have at most 1 identity
0684: * column per table. The return value won't be affected either if Cloudscape
0685: * table has more than one identity columns.
0686: */
0687: setIdentity = (!autoincrementGenerated)
0688: && isSourceRowResultSet();
0689: autoincrementGenerated = true;
0690:
0691: if (bulkInsert) {
0692: ColumnDescriptor cd = td
0693: .getColumnDescriptor(columnPosition);
0694: long ret;
0695:
0696: // for bulk insert we have the table descriptor
0697: // System.out.println("in bulk insert");
0698: if (aiCache[index].isNull()) {
0699: if (bulkInsertReplace) {
0700: startValue = cd.getAutoincStart();
0701: } else {
0702: dvd = dd.getSetAutoincrementValue(
0703: constants.autoincRowLocation[index], tc,
0704: false, aiCache[index], true);
0705: startValue = dvd.getLong();
0706: }
0707: lcc.autoincrementCreateCounter(td.getSchemaName(), td
0708: .getName(), cd.getColumnName(), new Long(
0709: startValue), increment, columnPosition);
0710:
0711: }
0712: ret = lcc.nextAutoincrementValue(td.getSchemaName(), td
0713: .getName(), cd.getColumnName());
0714: aiCache[columnPosition - 1].setValue(ret);
0715: }
0716:
0717: else {
0718: NumberDataValue newValue;
0719: TransactionController nestedTC = null, tcToUse = tc;
0720:
0721: try {
0722: nestedTC = tc.startNestedUserTransaction(false);
0723: tcToUse = nestedTC;
0724: }
0725:
0726: catch (StandardException se) {
0727: // If I cannot start a Nested User Transaction use the parent
0728: // transaction to do all the work.
0729: tcToUse = tc;
0730: }
0731:
0732: try {
0733: /* If tcToUse == tc, then we are using parent xaction-- this
0734: can happen if for some reason we couldn't start a nested
0735: transaction
0736: */
0737: newValue = dd.getSetAutoincrementValue(
0738: constants.autoincRowLocation[index], tcToUse,
0739: true, aiCache[index], (tcToUse == tc));
0740: }
0741:
0742: catch (StandardException se) {
0743: if (tcToUse == tc) {
0744: /* we've using the parent xaction and we've timed out; just
0745: throw an error and exit.
0746: */
0747: throw se;
0748: }
0749:
0750: if (se.getMessageId().equals(SQLState.LOCK_TIMEOUT)) {
0751: // if we couldn't do this with a nested xaction, retry with
0752: // parent-- we need to wait this time!
0753: newValue = dd.getSetAutoincrementValue(
0754: constants.autoincRowLocation[index], tc,
0755: true, aiCache[index], true);
0756: } else if (se.getMessageId().equals(
0757: SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE)) {
0758: // if we got an overflow error, throw a more meaningful
0759: // error message
0760: throw StandardException.newException(
0761: SQLState.LANG_AI_OVERFLOW, se, constants
0762: .getTableName(), constants
0763: .getColumnName(index));
0764: } else
0765: throw se;
0766: } finally {
0767: // no matter what, commit the nested transaction; if something
0768: // bad happened in the child xaction lets not abort the parent
0769: // here.
0770: if (nestedTC != null) {
0771: nestedTC.commit();
0772: nestedTC.destroy();
0773: }
0774: }
0775: aiCache[index] = newValue;
0776: if (setIdentity)
0777: identityVal = newValue.getLong();
0778: }
0779:
0780: return aiCache[index];
0781:
0782: }
0783:
0784: // Is sourceResultSet a RowResultSet (values clause)?
0785: private boolean isSourceRowResultSet() {
0786: boolean isRow = false;
0787: if (sourceResultSet instanceof NormalizeResultSet)
0788: isRow = (((NormalizeResultSet) sourceResultSet).source instanceof RowResultSet);
0789: return isRow;
0790: }
0791:
0792: // checks if source result set is a RowResultSet type.
0793: private boolean isSingleRowResultSet() {
0794: boolean isRow = false;
0795:
0796: if (sourceResultSet instanceof RowResultSet)
0797: isRow = true;
0798: else if (sourceResultSet instanceof NormalizeResultSet)
0799: isRow = (((NormalizeResultSet) sourceResultSet).source instanceof RowResultSet);
0800:
0801: return isRow;
0802: }
0803:
0804: // Do the work for a "normal" insert
0805: private void normalInsertCore(LanguageConnectionContext lcc,
0806: boolean firstExecute) throws StandardException {
0807: boolean setUserIdentity = constants.hasAutoincrement()
0808: && isSingleRowResultSet();
0809: boolean firstDeferredRow = true;
0810: ExecRow deferredRowBuffer = null;
0811: long user_autoinc = 0;
0812:
0813: /* Get or re-use the row changer.
0814: * NOTE: We need to set ourself as the top result set
0815: * if this is not the 1st execution. (Done in constructor
0816: * for 1st execution.)
0817: */
0818: if (firstExecute) {
0819: rowChanger = lcc.getLanguageConnectionFactory()
0820: .getExecutionFactory().getRowChanger(heapConglom,
0821: constants.heapSCOCI, heapDCOCI,
0822: constants.irgs, constants.indexCIDS,
0823: constants.indexSCOCIs, indexDCOCIs,
0824: 0, // number of columns in partial row meaningless for insert
0825: tc,
0826: null, //Changed column ids
0827: constants.getStreamStorableHeapColIds(),
0828: activation);
0829: rowChanger.setIndexNames(constants.indexNames);
0830: } else {
0831: lcc.getStatementContext().setTopResultSet(this ,
0832: subqueryTrackingArray);
0833: }
0834:
0835: /* decode lock mode for the execution isolation level */
0836: int lockMode = UpdateResultSet.decodeLockMode(lcc,
0837: constants.lockMode);
0838:
0839: rowChanger.open(lockMode);
0840:
0841: /* The source does not know whether or not we are doing a
0842: * deferred mode insert. If we are, then we must clear the
0843: * index scan info from the activation so that the row changer
0844: * does not re-use that information (which won't be valid for
0845: * a deferred mode insert).
0846: */
0847: if (constants.deferred) {
0848: activation.clearIndexScanInfo();
0849: }
0850:
0851: if (fkInfoArray != null) {
0852: if (fkChecker == null) {
0853: fkChecker = new RISetChecker(tc, fkInfoArray);
0854: } else {
0855: fkChecker.reopen();
0856: }
0857: }
0858:
0859: if (firstExecute && constants.deferred) {
0860: Properties properties = new Properties();
0861:
0862: // Get the properties on the old heap
0863: rowChanger.getHeapConglomerateController()
0864: .getInternalTablePropertySet(properties);
0865:
0866: /*
0867: ** If deferred we save a copy of the entire row.
0868: */
0869: rowHolder = new TemporaryRowHolderImpl(activation,
0870: properties, resultDescription);
0871: rowChanger.setRowHolder(rowHolder);
0872: }
0873:
0874: int[] columnIndexes = null;
0875: if (firstExecute
0876: && activation.getAutoGeneratedKeysResultsetMode()) {
0877: ResultDescription rd;
0878: Properties properties = new Properties();
0879: columnIndexes = activation
0880: .getAutoGeneratedKeysColumnIndexes();
0881:
0882: // Get the properties on the old heap
0883: rowChanger.getHeapConglomerateController()
0884: .getInternalTablePropertySet(properties);
0885:
0886: if (columnIndexes != null) {//use user provided column positions array
0887: columnIndexes = uniqueColumnPositionArray(columnIndexes);
0888: } else { //prepare array of auto-generated keys for the table since user didn't provide any
0889: columnIndexes = generatedColumnPositionsArray();
0890: }
0891:
0892: rd = lcc.getLanguageFactory().getResultDescription(
0893: resultDescription, columnIndexes);
0894: autoGeneratedKeysRowsHolder = new TemporaryRowHolderImpl(
0895: activation, properties, rd);
0896: }
0897:
0898: while (row != null) {
0899: if (activation.getAutoGeneratedKeysResultsetMode())
0900: autoGeneratedKeysRowsHolder.insert(getCompactRow(row,
0901: columnIndexes));
0902:
0903: /*
0904: ** If we're doing a deferred insert, insert into the temporary
0905: ** conglomerate. Otherwise, insert directly into the permanent
0906: ** conglomerates using the rowChanger.
0907: */
0908: if (constants.deferred) {
0909: rowHolder.insert(row);
0910: } else {
0911: // Evaluate any check constraints on the row
0912: evaluateCheckConstraints();
0913:
0914: if (fkChecker != null) {
0915: fkChecker.doFKCheck(row);
0916: }
0917:
0918: // Objectify any streaming columns that are indexed.
0919: if (constants.irgs.length > 0) {
0920: DataValueDescriptor[] rowArray = row.getRowArray();
0921: for (int i = 0; i < rowArray.length; i++) {
0922: //System.out.println("checking " + i);
0923: if (!constants.indexedCols[i]) {
0924: continue;
0925: }
0926:
0927: if (rowArray[i] instanceof StreamStorable)
0928: rowArray[i].getObject();
0929: }
0930: }
0931: rowChanger.insertRow(row);
0932: }
0933:
0934: rowCount++;
0935:
0936: if (setUserIdentity) {
0937: dd = lcc.getDataDictionary();
0938: td = dd.getTableDescriptor(constants.targetUUID);
0939:
0940: int maxColumns = td.getMaxColumnID();
0941: int col;
0942:
0943: for (col = 1; col <= maxColumns; col++) {
0944: ColumnDescriptor cd = td.getColumnDescriptor(col);
0945: if (cd.isAutoincrement()) {
0946: break;
0947: }
0948: }
0949:
0950: if (col <= maxColumns) {
0951: DataValueDescriptor dvd = row.cloneColumn(col);
0952: user_autoinc = dvd.getLong();
0953: }
0954: }
0955:
0956: // No need to do a next on a single row source
0957: if (constants.singleRowSource) {
0958: row = null;
0959: } else {
0960: row = getNextRowCore(sourceResultSet);
0961: }
0962: }
0963:
0964: /*
0965: ** If it's a deferred insert, scan the temporary conglomerate and
0966: ** insert the rows into the permanent conglomerates using rowChanger.
0967: */
0968: if (constants.deferred) {
0969: if (triggerInfo != null) {
0970: Vector v = null;
0971: if (aiCache != null) {
0972: v = new Vector();
0973: for (int i = 0; i < aiCache.length; i++) {
0974: String s, t, c;
0975: if (aiCache[i] == null)
0976: continue;
0977:
0978: Long initialValue = lcc.lastAutoincrementValue(
0979: (s = constants.getSchemaName()),
0980: (t = constants.getTableName()),
0981: (c = constants.getColumnName(i)));
0982:
0983: AutoincrementCounter aic = new AutoincrementCounter(
0984: initialValue, constants
0985: .getAutoincIncrement(i),
0986: aiCache[i].getLong(), s, t, c, i + 1);
0987: v.addElement(aic);
0988: }
0989: }
0990:
0991: if (triggerActivator == null) {
0992: triggerActivator = new TriggerEventActivator(lcc,
0993: tc, constants.targetUUID, triggerInfo,
0994: TriggerExecutionContext.INSERT_EVENT,
0995: activation, v);
0996: } else {
0997: triggerActivator.reopen();
0998: }
0999:
1000: // fire BEFORE trigger, do this before checking constraints
1001: triggerActivator.notifyEvent(
1002: TriggerEvents.BEFORE_INSERT,
1003: (CursorResultSet) null, rowHolder
1004: .getResultSet());
1005: }
1006:
1007: CursorResultSet rs = rowHolder.getResultSet();
1008: try {
1009: rs.open();
1010: while ((deferredRowBuffer = rs.getNextRow()) != null) {
1011: // we have to set the source row so the check constraint
1012: // sees the correct row.
1013: sourceResultSet.setCurrentRow(deferredRowBuffer);
1014: evaluateCheckConstraints();
1015: rowChanger.insertRow(deferredRowBuffer);
1016: }
1017: } finally {
1018: sourceResultSet.clearCurrentRow();
1019: rs.close();
1020: }
1021:
1022: if (fkChecker != null) {
1023: /*
1024: ** Second scan to make sure all the foreign key
1025: ** constraints are ok. We have to do this after
1026: ** we have completed the inserts in case of self
1027: ** referencing constraints.
1028: */
1029: rs = rowHolder.getResultSet();
1030: try {
1031: rs.open();
1032: while ((deferredRowBuffer = rs.getNextRow()) != null) {
1033: fkChecker.doFKCheck(deferredRowBuffer);
1034: }
1035: } finally {
1036: rs.close();
1037: }
1038: }
1039:
1040: // fire AFTER trigger
1041: if (triggerActivator != null) {
1042: triggerActivator.notifyEvent(
1043: TriggerEvents.AFTER_INSERT,
1044: (CursorResultSet) null, rowHolder
1045: .getResultSet());
1046: }
1047: }
1048:
1049: if (rowHolder != null) {
1050: rowHolder.close();
1051: // rowHolder kept across opens
1052: }
1053: if (fkChecker != null) {
1054: fkChecker.close();
1055: fkChecker = null;
1056: }
1057: if (setIdentity)
1058: lcc.setIdentityValue(identityVal);
1059: /*
1060: * find the value of the identity column from the user inserted value
1061: * and do a lcc.setIdentityValue(<user_value>);
1062: */
1063: else if (setUserIdentity) {
1064: lcc.setIdentityValue(user_autoinc);
1065: }
1066: }
1067:
1068: /*
1069: * Take the input row and return a new compact ExecRow
1070: * using the column positions provided in columnIndexes.
1071: * Copies references, no cloning.
1072: */
1073: private ExecRow getCompactRow(ExecRow inputRow, int[] columnIndexes)
1074: throws StandardException {
1075: ExecRow outRow;
1076: int numInputCols = inputRow.nColumns();
1077:
1078: if (columnIndexes == null) {
1079: outRow = new ValueRow(numInputCols);
1080: Object[] src = inputRow.getRowArray();
1081: Object[] dst = outRow.getRowArray();
1082: System.arraycopy(src, 0, dst, 0, src.length);
1083: return outRow;
1084: }
1085:
1086: int numOutputCols = columnIndexes.length;
1087:
1088: outRow = new ValueRow(numOutputCols);
1089: for (int i = 0; i < numOutputCols; i++) {
1090: outRow.setColumn(i + 1, inputRow
1091: .getColumn(columnIndexes[i]));
1092: }
1093:
1094: return outRow;
1095: }
1096:
1097: // Do the work for a bulk insert
1098: private long bulkInsertCore(LanguageConnectionContext lcc,
1099: long oldHeapConglom) throws StandardException {
1100: fullTemplate = constants.getEmptyHeapRow(lcc);
1101: bulkHeapCC = tc.openCompiledConglomerate(false,
1102: TransactionController.OPENMODE_FORUPDATE,
1103: TransactionController.MODE_TABLE,
1104: TransactionController.ISOLATION_SERIALIZABLE,
1105: constants.heapSCOCI, heapDCOCI);
1106:
1107: long newHeapConglom;
1108:
1109: Properties properties = new Properties();
1110:
1111: // Get the properties on the old heap
1112: bulkHeapCC.getInternalTablePropertySet(properties);
1113:
1114: if (triggerInfo != null) {
1115: triggerActivator = new TriggerEventActivator(lcc, tc,
1116: constants.targetUUID, triggerInfo,
1117: TriggerExecutionContext.INSERT_EVENT, activation,
1118: null);
1119: }
1120:
1121: /*
1122: ** If we have a before row trigger, then we
1123: ** are going to use a row holder pass to our
1124: ** trigger.
1125: */
1126: if (hasBeforeRowTrigger && rowHolder != null) {
1127: rowHolder = new TemporaryRowHolderImpl(activation,
1128: properties, resultDescription);
1129: }
1130:
1131: // Add any new properties or change the values of any existing properties
1132: Properties targetProperties = constants.getTargetProperties();
1133: Enumeration key = targetProperties.keys();
1134: while (key.hasMoreElements()) {
1135: String keyValue = (String) key.nextElement();
1136: properties.put(keyValue, targetProperties
1137: .getProperty(keyValue));
1138: }
1139:
1140: // Are there indexes to be updated?
1141: if (constants.irgs.length > 0) {
1142: // Tell source whether or not we need the RIDs back
1143: sourceResultSet.setNeedsRowLocation(true);
1144: }
1145:
1146: dd = lcc.getDataDictionary();
1147: td = dd.getTableDescriptor(constants.targetUUID);
1148:
1149: /* Do the bulk insert - only okay to reuse the
1150: * same conglomerate if bulkInsert.
1151: */
1152: long[] loadedRowCount = new long[1];
1153: if (bulkInsertReplace) {
1154: newHeapConglom = tc.createAndLoadConglomerate("heap",
1155: fullTemplate.getRowArray(),
1156: null, //column sort order - not required for heap
1157: properties, TransactionController.IS_DEFAULT,
1158: sourceResultSet, loadedRowCount);
1159: } else {
1160: newHeapConglom = tc.recreateAndLoadConglomerate("heap",
1161: false, fullTemplate.getRowArray(),
1162: null, //column sort order - not required for heap
1163: properties, TransactionController.IS_DEFAULT,
1164: oldHeapConglom, sourceResultSet, loadedRowCount);
1165: }
1166:
1167: /* Nothing else to do if we get back the same conglomerate number.
1168: * (In 2.0 this means that 0 rows were inserted.)
1169: */
1170: if (newHeapConglom == oldHeapConglom) {
1171: return oldHeapConglom;
1172: }
1173:
1174: // Find out how many rows were inserted
1175: rowCount = (int) loadedRowCount[0];
1176:
1177: // Set the "estimated" row count
1178: setEstimatedRowCount(newHeapConglom);
1179:
1180: /*
1181: ** Inform the data dictionary that we are about to write to it.
1182: ** There are several calls to data dictionary "get" methods here
1183: ** that might be done in "read" mode in the data dictionary, but
1184: ** it seemed safer to do this whole operation in "write" mode.
1185: **
1186: ** We tell the data dictionary we're done writing at the end of
1187: ** the transaction.
1188: */
1189: dd.startWriting(lcc);
1190:
1191: lcc.autoincrementFlushCache(constants.targetUUID);
1192:
1193: // invalidate any prepared statements that
1194: // depended on this table (including this one)
1195: DependencyManager dm = dd.getDependencyManager();
1196:
1197: dm.invalidateFor(td, DependencyManager.BULK_INSERT, lcc);
1198:
1199: // Update all indexes
1200: if (constants.irgs.length > 0) {
1201: updateAllIndexes(newHeapConglom, constants, td, dd,
1202: fullTemplate);
1203: }
1204:
1205: // Drop the old conglomerate
1206: bulkHeapCC.close();
1207: bulkHeapCC = null;
1208:
1209: /* Update the DataDictionary
1210: * RESOLVE - this will change in 1.4 because we will get
1211: * back the same conglomerate number
1212: */
1213: // Get the ConglomerateDescriptor for the heap
1214: ConglomerateDescriptor cd = td
1215: .getConglomerateDescriptor(oldHeapConglom);
1216:
1217: // Update sys.sysconglomerates with new conglomerate #
1218: dd.updateConglomerateDescriptor(cd, newHeapConglom, tc);
1219: tc.dropConglomerate(oldHeapConglom);
1220: // END RESOLVE
1221:
1222: return newHeapConglom;
1223: }
1224:
1225: /*
1226: ** Bulk Referential Integrity Checker
1227: */
1228: private void bulkValidateForeignKeys(TransactionController tc,
1229: ContextManager cm) throws StandardException {
1230: FKInfo fkInfo;
1231:
1232: /*
1233: ** If there are no foreign keys, then nothing to worry
1234: ** about.
1235: ** With bulk insert replace, we still need to verify
1236: ** all non-self referencing foreign keys when
1237: ** there are no rows inserted into the table.
1238: */
1239: if ((indexRows == null && !bulkInsertReplace)
1240: || fkInfoArray == null) {
1241: return;
1242: }
1243:
1244: for (int i = 0; i < fkInfoArray.length; i++) {
1245: fkInfo = fkInfoArray[i];
1246:
1247: /* With regular bulk insert, we only need to check the
1248: * foreign keys in the table we inserted into. We need
1249: * to get the new conglomerate #s for the foreign keys.
1250: *
1251: * With bulk insert replace, we need to check both the
1252: * foreign keys in the table as well as any foreign keys
1253: * on other tables referencing the table we inserted into.
1254: * If the foreign key is self-referencing then we need to
1255: * get the new conglomerate #, otherwise the conglomerate
1256: * # is the same as the compile time conglomerate #.
1257: * If the foreign key is self-referencing then we need to
1258: * get the new conglomerate # for the primary key as it
1259: * has changed. However, if the foreign key is not self-referencing
1260: * then we only need to get the new conglomerate # for
1261: * the primary key if the primary key is on the table being
1262: * inserted into.
1263: */
1264: if (bulkInsertReplace) {
1265: for (int index = 0; index < fkInfo.fkConglomNumbers.length; index++) {
1266: /* No need to check foreign key if it is self referencing
1267: * and there were no rows inserted on the replace, as both
1268: * indexes will be empty.
1269: */
1270: if (fkInfo.fkIsSelfReferencing[index]
1271: && indexRows == null) {
1272: continue;
1273: }
1274:
1275: long pkConglom;
1276: long fkConglom;
1277:
1278: if (fkInfo.fkIsSelfReferencing[index]) {
1279: /* Self-referencing foreign key. Both conglomerate
1280: * #s have changed.
1281: */
1282: pkConglom = ((Long) indexConversionTable
1283: .get(new Long(fkInfo.refConglomNumber)))
1284: .longValue();
1285: fkConglom = ((Long) indexConversionTable
1286: .get(new Long(
1287: fkInfo.fkConglomNumbers[index])))
1288: .longValue();
1289: } else {
1290: /* Non-self referencing foreign key. At this point we
1291: * don't know if the primary key or the foreign key is
1292: * on this table. So, for each one, we look to see
1293: * if the old conglomerate # is in the conversion table.
1294: * If so, then we get the new conglomerate #, otherwise
1295: * we use the compile time conglomerate #. This
1296: * is very simple, though not very elegant.
1297: */
1298: Long pkConglomLong = (Long) indexConversionTable
1299: .get(new Long(fkInfo.refConglomNumber));
1300: Long fkConglomLong = (Long) indexConversionTable
1301: .get(new Long(
1302: fkInfo.fkConglomNumbers[index]));
1303: if (pkConglomLong == null) {
1304: pkConglom = fkInfo.refConglomNumber;
1305: } else {
1306: pkConglom = pkConglomLong.longValue();
1307: }
1308: if (fkConglomLong == null) {
1309: fkConglom = fkInfo.fkConglomNumbers[index];
1310: } else {
1311: fkConglom = fkConglomLong.longValue();
1312: }
1313: }
1314: bulkValidateForeignKeysCore(tc, cm, fkInfoArray[i],
1315: fkConglom, pkConglom,
1316: fkInfo.fkConstraintNames[index]);
1317: }
1318: } else {
1319: /*
1320: ** We have a FKInfo for each foreign key we are
1321: ** checking. Note that there are no primary key
1322: ** checks on insert, so we can always reference
1323: ** element[0] in the current FKInfo structure.
1324: */
1325: if (SanityManager.DEBUG) {
1326: SanityManager
1327: .ASSERT(fkInfo.type == FKInfo.FOREIGN_KEY,
1328: "error, expected to only check foreign keys on insert");
1329: }
1330: Long fkConglom = (Long) indexConversionTable
1331: .get(new Long(fkInfo.fkConglomNumbers[0]));
1332: bulkValidateForeignKeysCore(tc, cm, fkInfoArray[i],
1333: fkConglom.longValue(), fkInfo.refConglomNumber,
1334: fkInfo.fkConstraintNames[0]);
1335: }
1336: }
1337: }
1338:
1339: private void bulkValidateForeignKeysCore(TransactionController tc,
1340: ContextManager cm, FKInfo fkInfo, long fkConglom,
1341: long pkConglom, String fkConstraintName)
1342: throws StandardException {
1343: ExecRow template;
1344: GroupFetchScanController refScan = null;
1345: GroupFetchScanController fkScan = null;
1346:
1347: try {
1348:
1349: template = makeIndexTemplate(fkInfo, fullTemplate, cm);
1350:
1351: /*
1352: ** The indexes have been dropped and recreated, so
1353: ** we need to get the new index conglomerate number.
1354: */
1355: fkScan = tc.openGroupFetchScan(fkConglom, false, // hold
1356: 0, // read only
1357: tc.MODE_TABLE, // doesn't matter,
1358: // already locked
1359: tc.ISOLATION_READ_COMMITTED, // doesn't matter,
1360: // already locked
1361: (FormatableBitSet) null, // retrieve all fields
1362: (DataValueDescriptor[]) null, // startKeyValue
1363: ScanController.GE, // startSearchOp
1364: null, // qualifier
1365: (DataValueDescriptor[]) null, // stopKeyValue
1366: ScanController.GT // stopSearchOp
1367: );
1368:
1369: if (SanityManager.DEBUG) {
1370: /*
1371: ** Bulk insert replace calls this method regardless
1372: ** of whether or not any rows were inserted because
1373: ** it has to check any referencing foreign keys
1374: ** after the replace. Otherwise, we
1375: ** make sure that we actually have a row in the fk.
1376: ** If not, we have an error because we thought that
1377: ** since indexRows != null, we must have gotten some
1378: ** rows.
1379: */
1380: if (!bulkInsertReplace) {
1381: SanityManager
1382: .ASSERT(fkScan.next(),
1383: "No rows in fk index, even though indexRows != null");
1384:
1385: /*
1386: ** Crank up the scan again.
1387: */
1388: fkScan.reopenScan((DataValueDescriptor[]) null, // startKeyValue
1389: ScanController.GE, // startSearchOp
1390: null, // qualifier
1391: (DataValueDescriptor[]) null, // stopKeyValue
1392: ScanController.GT // stopSearchOp
1393: );
1394: }
1395: }
1396:
1397: /*
1398: ** Open the referenced key scan. Use row locking on
1399: ** the referenced table unless it is self-referencing
1400: ** (in which case we don't need locks)
1401: */
1402: refScan = tc.openGroupFetchScan(pkConglom,
1403: false, // hold
1404: 0, // read only
1405: (fkConglom == pkConglom) ? tc.MODE_TABLE
1406: : tc.MODE_RECORD,
1407: tc.ISOLATION_READ_COMMITTED, // read committed is
1408: // good enough
1409: (FormatableBitSet) null, // retrieve all fields
1410: (DataValueDescriptor[]) null, // startKeyValue
1411: ScanController.GE, // startSearchOp
1412: null, // qualifier
1413: (DataValueDescriptor[]) null, // stopKeyValue
1414: ScanController.GT // stopSearchOp
1415: );
1416:
1417: /*
1418: ** Give the scans to the bulk checker to do its
1419: ** magic. It will do a merge on the two indexes.
1420: */
1421: ExecRow firstFailedRow = template.getClone();
1422: RIBulkChecker riChecker = new RIBulkChecker(refScan,
1423: fkScan, template, true, // fail on 1st failure
1424: (ConglomerateController) null, firstFailedRow);
1425:
1426: int numFailures = riChecker.doCheck();
1427: if (numFailures > 0) {
1428: StandardException se = StandardException.newException(
1429: SQLState.LANG_FK_VIOLATION, fkConstraintName,
1430: fkInfo.tableName, StatementUtil
1431: .typeName(fkInfo.stmtType), RowUtil
1432: .toString(firstFailedRow, 0,
1433: fkInfo.colArray.length - 1));
1434: throw se;
1435: }
1436: } finally {
1437: if (fkScan != null) {
1438: fkScan.close();
1439: fkScan = null;
1440: }
1441: if (refScan != null) {
1442: refScan.close();
1443: refScan = null;
1444: }
1445: }
1446: }
1447:
1448: /**
1449: * Make a template row with the correct columns.
1450: */
1451: private ExecRow makeIndexTemplate(FKInfo fkInfo,
1452: ExecRow fullTemplate, ContextManager cm)
1453: throws StandardException {
1454: ExecRow newRow = RowUtil.getEmptyIndexRow(
1455: fkInfo.colArray.length + 1, cm);
1456:
1457: DataValueDescriptor[] templateColArray = fullTemplate
1458: .getRowArray();
1459: DataValueDescriptor[] newRowColArray = newRow.getRowArray();
1460:
1461: int i;
1462: for (i = 0; i < fkInfo.colArray.length; i++) {
1463: newRowColArray[i] = (templateColArray[fkInfo.colArray[i] - 1])
1464: .getClone();
1465: }
1466:
1467: newRowColArray[i] = (DataValueDescriptor) fkInfo.rowLocation
1468: .cloneObject();
1469:
1470: return newRow;
1471: }
1472:
1473: /**
1474: * Set up to update all of the indexes on a table when doing a bulk insert
1475: * on an empty table.
1476: *
1477: * @exception StandardException thrown on error
1478: */
1479: private void setUpAllSorts(ExecRow sourceRow, RowLocation rl)
1480: throws StandardException {
1481: int numIndexes = constants.irgs.length;
1482: int numColumns = td.getNumberOfColumns();
1483:
1484: ordering = new ColumnOrdering[numIndexes][];
1485: needToDropSort = new boolean[numIndexes];
1486: sortIds = new long[numIndexes];
1487: rowSources = new RowLocationRetRowSource[numIndexes];
1488: // indexedCols is 1-based
1489: indexedCols = new FormatableBitSet(numColumns + 1);
1490:
1491: /* For each index, build a single index row and a sorter. */
1492: for (int index = 0; index < numIndexes; index++) {
1493: // Update the bit map of indexed columns
1494: int[] keyColumns = constants.irgs[index]
1495: .baseColumnPositions();
1496: for (int i2 = 0; i2 < keyColumns.length; i2++) {
1497: // indexedCols is 1-based
1498: indexedCols.set(keyColumns[i2]);
1499: }
1500:
1501: // create a single index row template for each index
1502: indexRows[index] = constants.irgs[index]
1503: .getIndexRowTemplate();
1504:
1505: // Get an index row based on the base row
1506: // (This call is only necessary here because we need to pass a template to the sorter.)
1507: constants.irgs[index].getIndexRow(sourceRow, rl,
1508: indexRows[index], (FormatableBitSet) null);
1509:
1510: /* For non-unique indexes, we order by all columns + the RID.
1511: * For unique indexes, we just order by the columns.
1512: * We create a unique index observer for unique indexes
1513: * so that we can catch duplicate key
1514: */
1515: ConglomerateDescriptor cd;
1516: // Get the ConglomerateDescriptor for the index
1517: cd = td
1518: .getConglomerateDescriptor(constants.indexCIDS[index]);
1519: int[] baseColumnPositions = constants.irgs[index]
1520: .baseColumnPositions();
1521: boolean[] isAscending = constants.irgs[index].isAscending();
1522: int numColumnOrderings;
1523: SortObserver sortObserver = null;
1524: /* We can only reuse the wrappers when doing an
1525: * external sort if there is only 1 index. Otherwise,
1526: * we could get in a situation where 1 sort reuses a
1527: * wrapper that is still in use in another sort.
1528: */
1529: boolean reuseWrappers = (numIndexes == 1);
1530: if (cd.getIndexDescriptor().isUnique()) {
1531: numColumnOrderings = baseColumnPositions.length;
1532: String[] columnNames = getColumnNames(baseColumnPositions);
1533:
1534: String indexOrConstraintName = cd.getConglomerateName();
1535: if (cd.isConstraint()) // so, the index is backing up a constraint
1536: {
1537: ConstraintDescriptor conDesc = dd
1538: .getConstraintDescriptor(td, cd.getUUID());
1539: indexOrConstraintName = conDesc.getConstraintName();
1540: }
1541: sortObserver = new UniqueIndexSortObserver(
1542: false, // don't clone rows
1543: cd.isConstraint(), indexOrConstraintName,
1544: indexRows[index], reuseWrappers, td.getName());
1545: } else {
1546: numColumnOrderings = baseColumnPositions.length + 1;
1547: sortObserver = new BasicSortObserver(false, false,
1548: indexRows[index], reuseWrappers);
1549: }
1550: ordering[index] = new ColumnOrdering[numColumnOrderings];
1551: for (int ii = 0; ii < isAscending.length; ii++) {
1552: ordering[index][ii] = new IndexColumnOrder(ii,
1553: isAscending[ii]);
1554: }
1555: if (numColumnOrderings > isAscending.length)
1556: ordering[index][isAscending.length] = new IndexColumnOrder(
1557: isAscending.length);
1558:
1559: // create the sorters
1560: sortIds[index] = tc.createSort((Properties) null,
1561: indexRows[index].getRowArrayClone(),
1562: ordering[index], sortObserver, false, // not in order
1563: (int) sourceResultSet.getEstimatedRowCount(), // est rows
1564: -1 // est row size, -1 means no idea
1565: );
1566: needToDropSort[index] = true;
1567: }
1568:
1569: sorters = new SortController[numIndexes];
1570:
1571: // Open the sorts
1572: for (int index = 0; index < numIndexes; index++) {
1573: sorters[index] = tc.openSort(sortIds[index]);
1574: needToDropSort[index] = true;
1575: }
1576: }
1577:
1578: /**
1579: * Update all of the indexes on a table when doing a bulk insert
1580: * on an empty table.
1581: *
1582: * @exception StandardException thrown on error
1583: */
1584: private void updateAllIndexes(long newHeapConglom,
1585: InsertConstantAction constants, TableDescriptor td,
1586: DataDictionary dd, ExecRow fullTemplate)
1587: throws StandardException {
1588: int numIndexes = constants.irgs.length;
1589:
1590: /*
1591: ** If we didn't actually read in any rows, then
1592: ** we don't need to do anything, unless we were
1593: ** doing a replace.
1594: */
1595: if (indexRows == null) {
1596: if (bulkInsertReplace) {
1597: emptyIndexes(newHeapConglom, constants, td, dd,
1598: fullTemplate);
1599: }
1600: return;
1601: }
1602:
1603: dd.dropStatisticsDescriptors(td.getUUID(), null, tc);
1604: long[] newIndexCongloms = new long[numIndexes];
1605:
1606: indexConversionTable = new Hashtable(numIndexes);
1607: // Populate each index
1608: for (int index = 0; index < numIndexes; index++) {
1609: ConglomerateController indexCC;
1610: Properties properties = new Properties();
1611: ConglomerateDescriptor cd;
1612: // Get the ConglomerateDescriptor for the index
1613: cd = td
1614: .getConglomerateDescriptor(constants.indexCIDS[index]);
1615:
1616: // Build the properties list for the new conglomerate
1617: indexCC = tc.openCompiledConglomerate(false,
1618: TransactionController.OPENMODE_FORUPDATE,
1619: TransactionController.MODE_TABLE,
1620: TransactionController.ISOLATION_SERIALIZABLE,
1621: constants.indexSCOCIs[index], indexDCOCIs[index]);
1622:
1623: // Get the properties on the old index
1624: indexCC.getInternalTablePropertySet(properties);
1625:
1626: /* Create the properties that language supplies when creating the
1627: * the index. (The store doesn't preserve these.)
1628: */
1629: int indexRowLength = indexRows[index].nColumns();
1630: properties.put("baseConglomerateId", Long
1631: .toString(newHeapConglom));
1632: if (cd.getIndexDescriptor().isUnique()) {
1633: properties.put("nUniqueColumns", Integer
1634: .toString(indexRowLength - 1));
1635: } else {
1636: properties.put("nUniqueColumns", Integer
1637: .toString(indexRowLength));
1638: }
1639: properties.put("rowLocationColumn", Integer
1640: .toString(indexRowLength - 1));
1641: properties.put("nKeyFields", Integer
1642: .toString(indexRowLength));
1643:
1644: indexCC.close();
1645:
1646: // We can finally drain the sorter and rebuild the index
1647: // RESOLVE - all indexes are btrees right now
1648: // Populate the index.
1649: sorters[index].close();
1650: sorters[index] = null;
1651: rowSources[index] = new CardinalityCounter(tc
1652: .openSortRowSource(sortIds[index]));
1653: newIndexCongloms[index] = tc.createAndLoadConglomerate(
1654: "BTREE", indexRows[index].getRowArray(),
1655: ordering[index], properties,
1656: TransactionController.IS_DEFAULT,
1657: rowSources[index], (long[]) null);
1658:
1659: CardinalityCounter cCount = (CardinalityCounter) rowSources[index];
1660: long numRows;
1661: if ((numRows = cCount.getRowCount()) > 0) {
1662: long[] c = cCount.getCardinality();
1663: DataDescriptorGenerator ddg = dd
1664: .getDataDescriptorGenerator();
1665:
1666: for (int i = 0; i < c.length; i++) {
1667: StatisticsDescriptor statDesc = new StatisticsDescriptor(
1668: dd, dd.getUUIDFactory().createUUID(), cd
1669: .getUUID(), td.getUUID(), "I",
1670: new StatisticsImpl(numRows, c[i]), i + 1);
1671: dd.addDescriptor(statDesc, null,
1672: DataDictionary.SYSSTATISTICS_CATALOG_NUM,
1673: true, tc);
1674: }
1675:
1676: }
1677:
1678: /* Update the DataDictionary
1679: * RESOLVE - this will change in 1.4 because we will get
1680: * back the same conglomerate number
1681: *
1682: * Update sys.sysconglomerates with new conglomerate #, if the
1683: * conglomerate is shared by duplicate indexes, all the descriptors
1684: * for those indexes need to be updated with the new number.
1685: */
1686: dd
1687: .updateConglomerateDescriptor(
1688: td
1689: .getConglomerateDescriptors(constants.indexCIDS[index]),
1690: newIndexCongloms[index], tc);
1691:
1692: // Drop the old conglomerate
1693: tc.dropConglomerate(constants.indexCIDS[index]);
1694:
1695: indexConversionTable.put(new Long(
1696: constants.indexCIDS[index]), new Long(
1697: newIndexCongloms[index]));
1698: }
1699: }
1700:
1701: /**
1702: * @see ResultSet#cleanUp
1703: *
1704: * @exception StandardException Thrown on error
1705: */
1706: public void cleanUp() throws StandardException {
1707:
1708: if (tableScan != null) {
1709: tableScan.close();
1710: tableScan = null;
1711: }
1712:
1713: if (triggerActivator != null) {
1714: triggerActivator.cleanup();
1715: // triggerActivator is reused across executions
1716: }
1717:
1718: /* Close down the source ResultSet tree */
1719: if (sourceResultSet != null) {
1720: sourceResultSet.close();
1721: // sourceResultSet is reused across executions
1722: }
1723: numOpens = 0;
1724:
1725: if (rowChanger != null) {
1726: rowChanger.close();
1727: }
1728:
1729: if (rowHolder != null) {
1730: rowHolder.close();
1731: }
1732:
1733: if (fkChecker != null) {
1734: fkChecker.close();
1735: // fkChecker is reused across executions
1736: }
1737:
1738: if (bulkHeapCC != null) {
1739: bulkHeapCC.close();
1740: bulkHeapCC = null;
1741: }
1742:
1743: if (bulkHeapSC != null) {
1744: bulkHeapSC.close();
1745: bulkHeapSC = null;
1746: }
1747:
1748: // Close each sorter
1749: if (sorters != null) {
1750: for (int index = 0; index < constants.irgs.length; index++) {
1751: if (sorters[index] != null) {
1752: sorters[index].close();
1753: }
1754: sorters[index] = null;
1755: }
1756: }
1757:
1758: if (needToDropSort != null) {
1759: for (int index = 0; index < needToDropSort.length; index++) {
1760: if (needToDropSort[index]) {
1761: tc.dropSort(sortIds[index]);
1762: needToDropSort[index] = false;
1763: }
1764: }
1765: }
1766:
1767: if (rowSources != null) {
1768: for (int index = 0; index < rowSources.length; index++) {
1769: if (rowSources[index] != null) {
1770: rowSources[index].closeRowSource();
1771: rowSources[index] = null;
1772: }
1773: }
1774: }
1775: super .close();
1776: }
1777:
1778: // Class implementation
1779:
1780: /**
1781: * Verify that bulkInsert is allowed on this table.
1782: * The execution time check to see if bulkInsert is allowed
1783: * simply consists of checking to see if this is not a deferred
1784: * mode insert and that the table is empty if this is not replace.
1785: *
1786: * A side effect of calling this method is to get an exclusive
1787: * table lock on the table.
1788: *
1789: * @return Whether or not bulkInsert is allowed on this table.
1790: *
1791: * @exception StandardException Thrown on error
1792: */
1793: protected boolean verifyBulkInsert() throws StandardException {
1794: // bulk insert is disabled for deferred mode inserts
1795: if (constants.deferred) {
1796: /* bulk insert replace should be disallowed for
1797: * deferred mode inserts.
1798: */
1799: if (SanityManager.DEBUG) {
1800: SanityManager
1801: .ASSERT(!bulkInsertReplace,
1802: "bulkInsertReplace expected to be false for deferred mode inserts");
1803: }
1804: return false;
1805: }
1806:
1807: return getExclusiveTableLock();
1808: }
1809:
1810: /**
1811: * Get an exclusive table lock on the target table
1812: * (and check to see if the table is populated if
1813: * this is not a bulk insert replace).
1814: *
1815: * @return Whether or not bulkInsert is allowed on this table.
1816: *
1817: * @exception StandardException Thrown on error
1818: */
1819: private boolean getExclusiveTableLock() throws StandardException {
1820: boolean rowFound = false;
1821:
1822: bulkHeapSC = tc.openCompiledScan(false,
1823: TransactionController.OPENMODE_FORUPDATE,
1824: TransactionController.MODE_TABLE,
1825: TransactionController.ISOLATION_SERIALIZABLE,
1826: (FormatableBitSet) null, (DataValueDescriptor[]) null,
1827: 0, (Qualifier[][]) null, (DataValueDescriptor[]) null,
1828: 0, constants.heapSCOCI, heapDCOCI);
1829:
1830: /* No need to do next if bulk insert replace
1831: * but we do need to get a row location for the
1832: * case where the replace leaves an empty table.
1833: */
1834: if (!bulkInsertReplace) {
1835: rowFound = bulkHeapSC.next();
1836: } else {
1837: rl = bulkHeapSC.newRowLocationTemplate();
1838: }
1839:
1840: bulkHeapSC.close();
1841: bulkHeapSC = null;
1842:
1843: return !rowFound;
1844: }
1845:
1846: /**
1847: * Set the estimated row count for this table.
1848: *
1849: * @param heapConglom Conglomerate number for the heap
1850: *
1851: * @exception StandardException Thrown on failure
1852: */
1853: private void setEstimatedRowCount(long heapConglom)
1854: throws StandardException {
1855: bulkHeapSC = tc.openCompiledScan(false,
1856: TransactionController.OPENMODE_FORUPDATE,
1857: TransactionController.MODE_TABLE,
1858: TransactionController.ISOLATION_SERIALIZABLE,
1859: (FormatableBitSet) null, (DataValueDescriptor[]) null,
1860: 0, (Qualifier[][]) null, (DataValueDescriptor[]) null,
1861: 0, constants.heapSCOCI, heapDCOCI);
1862:
1863: bulkHeapSC.setEstimatedRowCount(rowCount);
1864:
1865: bulkHeapSC.close();
1866: bulkHeapSC = null;
1867: }
1868:
1869: /**
1870: * Empty the indexes after doing a bulk insert replace
1871: * where the table has 0 rows after the replace.
1872: * RESOLVE: This method is ugly! Prior to 2.0, we simply
1873: * scanned back across the table to build the indexes. We
1874: * changed this in 2.0 to populate the sorters via a call back
1875: * as we populated the table. Doing a 0 row replace into a
1876: * table with indexes is a degenerate case, hence we allow
1877: * ugly and unoptimized code.
1878: *
1879: * @exception StandardException Thrown on failure
1880: */
1881: private void emptyIndexes(long newHeapConglom,
1882: InsertConstantAction constants, TableDescriptor td,
1883: DataDictionary dd, ExecRow fullTemplate)
1884: throws StandardException {
1885: int numIndexes = constants.irgs.length;
1886: ExecIndexRow[] indexRows = new ExecIndexRow[numIndexes];
1887: ExecRow baseRows = null;
1888: ColumnOrdering[][] ordering = new ColumnOrdering[numIndexes][];
1889: int numColumns = td.getNumberOfColumns();
1890:
1891: // Create the BitSet for mapping the partial row to the full row
1892: FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
1893: // Need to check each index for referenced columns
1894: int numReferencedColumns = 0;
1895: for (int index = 0; index < numIndexes; index++) {
1896: int[] baseColumnPositions = constants.irgs[index]
1897: .baseColumnPositions();
1898: for (int bcp = 0; bcp < baseColumnPositions.length; bcp++) {
1899: if (!bitSet.get(baseColumnPositions[bcp])) {
1900: bitSet.set(baseColumnPositions[bcp]);
1901: numReferencedColumns++;
1902: }
1903: }
1904: }
1905:
1906: // We can finally create the partial base row
1907: baseRows = activation.getExecutionFactory().getValueRow(
1908: numReferencedColumns);
1909:
1910: // Fill in each base row with nulls of the correct data type
1911: int colNumber = 0;
1912: for (int index = 0; index < numColumns; index++) {
1913: if (bitSet.get(index + 1)) {
1914: colNumber++;
1915: // NOTE: 1-based column numbers
1916: baseRows.setColumn(colNumber, fullTemplate.getColumn(
1917: index + 1).getClone());
1918: }
1919: }
1920:
1921: needToDropSort = new boolean[numIndexes];
1922: sortIds = new long[numIndexes];
1923:
1924: /* Do the initial set up before scanning the heap.
1925: * For each index, build a single index row and a sorter.
1926: */
1927: for (int index = 0; index < numIndexes; index++) {
1928: // create a single index row template for each index
1929: indexRows[index] = constants.irgs[index]
1930: .getIndexRowTemplate();
1931:
1932: // Get an index row based on the base row
1933: // (This call is only necessary here because we need to pass a template to the sorter.)
1934: constants.irgs[index].getIndexRow(baseRows, rl,
1935: indexRows[index], bitSet);
1936:
1937: /* For non-unique indexes, we order by all columns + the RID.
1938: * For unique indexes, we just order by the columns.
1939: * We create a unique index observer for unique indexes
1940: * so that we can catch duplicate key
1941: */
1942: ConglomerateDescriptor cd;
1943: // Get the ConglomerateDescriptor for the index
1944: cd = td
1945: .getConglomerateDescriptor(constants.indexCIDS[index]);
1946: int[] baseColumnPositions = constants.irgs[index]
1947: .baseColumnPositions();
1948: boolean[] isAscending = constants.irgs[index].isAscending();
1949: int numColumnOrderings;
1950: SortObserver sortObserver = null;
1951: if (cd.getIndexDescriptor().isUnique()) {
1952: numColumnOrderings = baseColumnPositions.length;
1953: String[] columnNames = getColumnNames(baseColumnPositions);
1954:
1955: String indexOrConstraintName = cd.getConglomerateName();
1956: if (cd.isConstraint()) // so, the index is backing up a constraint
1957: {
1958: ConstraintDescriptor conDesc = dd
1959: .getConstraintDescriptor(td, cd.getUUID());
1960: indexOrConstraintName = conDesc.getConstraintName();
1961: }
1962: sortObserver = new UniqueIndexSortObserver(
1963: false, // don't clone rows
1964: cd.isConstraint(), indexOrConstraintName,
1965: indexRows[index], true, td.getName());
1966: } else {
1967: numColumnOrderings = baseColumnPositions.length + 1;
1968: sortObserver = new BasicSortObserver(false, false,
1969: indexRows[index], true);
1970: }
1971: ordering[index] = new ColumnOrdering[numColumnOrderings];
1972: for (int ii = 0; ii < isAscending.length; ii++) {
1973: ordering[index][ii] = new IndexColumnOrder(ii,
1974: isAscending[ii]);
1975: }
1976: if (numColumnOrderings > isAscending.length)
1977: ordering[index][isAscending.length] = new IndexColumnOrder(
1978: isAscending.length);
1979:
1980: // create the sorters
1981: sortIds[index] = tc.createSort((Properties) null,
1982: indexRows[index].getRowArrayClone(),
1983: ordering[index], sortObserver, false, // not in order
1984: rowCount, // est rows
1985: -1 // est row size, -1 means no idea
1986: );
1987: needToDropSort[index] = true;
1988: }
1989:
1990: // Populate sorters and get the output of each sorter into a row
1991: // source. The sorters have the indexed columns only and the columns
1992: // are in the correct order.
1993: rowSources = new RowLocationRetRowSource[numIndexes];
1994: // Fill in the RowSources
1995: SortController[] sorters = new SortController[numIndexes];
1996: for (int index = 0; index < numIndexes; index++) {
1997: sorters[index] = tc.openSort(sortIds[index]);
1998: sorters[index].close();
1999: rowSources[index] = tc.openSortRowSource(sortIds[index]);
2000: }
2001:
2002: long[] newIndexCongloms = new long[numIndexes];
2003:
2004: // Populate each index
2005: for (int index = 0; index < numIndexes; index++) {
2006: ConglomerateController indexCC;
2007: Properties properties = new Properties();
2008: ConglomerateDescriptor cd;
2009: // Get the ConglomerateDescriptor for the index
2010: cd = td
2011: .getConglomerateDescriptor(constants.indexCIDS[index]);
2012:
2013: // Build the properties list for the new conglomerate
2014: indexCC = tc.openCompiledConglomerate(false,
2015: TransactionController.OPENMODE_FORUPDATE,
2016: TransactionController.MODE_TABLE,
2017: TransactionController.ISOLATION_SERIALIZABLE,
2018: constants.indexSCOCIs[index], indexDCOCIs[index]);
2019:
2020: // Get the properties on the old index
2021: indexCC.getInternalTablePropertySet(properties);
2022:
2023: /* Create the properties that language supplies when creating the
2024: * the index. (The store doesn't preserve these.)
2025: */
2026: int indexRowLength = indexRows[index].nColumns();
2027: properties.put("baseConglomerateId", Long
2028: .toString(newHeapConglom));
2029: if (cd.getIndexDescriptor().isUnique()) {
2030: properties.put("nUniqueColumns", Integer
2031: .toString(indexRowLength - 1));
2032: } else {
2033: properties.put("nUniqueColumns", Integer
2034: .toString(indexRowLength));
2035: }
2036: properties.put("rowLocationColumn", Integer
2037: .toString(indexRowLength - 1));
2038: properties.put("nKeyFields", Integer
2039: .toString(indexRowLength));
2040:
2041: indexCC.close();
2042:
2043: // We can finally drain the sorter and rebuild the index
2044: // RESOLVE - all indexes are btrees right now
2045: // Populate the index.
2046: newIndexCongloms[index] = tc.createAndLoadConglomerate(
2047: "BTREE", indexRows[index].getRowArray(),
2048: null, //default column sort order
2049: properties, TransactionController.IS_DEFAULT,
2050: rowSources[index], (long[]) null);
2051:
2052: /* Update the DataDictionary
2053: *
2054: * Update sys.sysconglomerates with new conglomerate #, if the
2055: * conglomerate is shared by duplicate indexes, all the descriptors
2056: * for those indexes need to be updated with the new number.
2057: */
2058: dd
2059: .updateConglomerateDescriptor(
2060: td
2061: .getConglomerateDescriptors(constants.indexCIDS[index]),
2062: newIndexCongloms[index], tc);
2063:
2064: // Drop the old conglomerate
2065: tc.dropConglomerate(constants.indexCIDS[index]);
2066: }
2067: }
2068:
2069: /**
2070: * Get me a table scan result set, preferably a bulk
2071: * table scan, thank you. If we already have one, reopen it.
2072: */
2073: private BulkTableScanResultSet getTableScanResultSet(long conglomId)
2074: throws StandardException {
2075: if (tableScan == null) {
2076: tableScan = new BulkTableScanResultSet(conglomId, tc
2077: .getStaticCompiledConglomInfo(conglomId),
2078: activation,
2079: new MyRowAllocator(fullTemplate), // result row allocator
2080: 0, // result set number
2081: (GeneratedMethod) null, // start key getter
2082: 0, // start search operator
2083: (GeneratedMethod) null, // stop key getter
2084: 0, // start search operator
2085: false,
2086: (Qualifier[][]) null, // qualifiers
2087: "tableName", (String) null,
2088: (String) null, // index name
2089: false, // is constraint
2090: false, // for update
2091: -1, // saved object for referenced bitImpl
2092: -1, tc.MODE_TABLE,
2093: true, // table locked
2094: tc.ISOLATION_READ_COMMITTED,
2095: LanguageProperties.BULK_FETCH_DEFAULT_INT, // rows per read
2096: false, // not a 1 row per scan
2097: 0d, // estimated rows
2098: 0d // estimated cost
2099: );
2100: tableScan.openCore();
2101: } else {
2102: tableScan.reopenCore();
2103: }
2104: return tableScan;
2105: }
2106:
2107: private String[] getColumnNames(int[] baseColumnPositions) {
2108: int length = baseColumnPositions.length;
2109: String[] columnNames = new String[length];
2110: for (int i = 0; i < length; i++) {
2111: columnNames[i] = constants.getColumnName(i);
2112: }
2113: return columnNames;
2114: }
2115:
2116: public void finish() throws StandardException {
2117: sourceResultSet.finish();
2118: super .finish();
2119: }
2120:
2121: // inner class to be our row template constructor
2122: class MyRowAllocator implements GeneratedMethod {
2123: private ExecRow row;
2124:
2125: MyRowAllocator(ExecRow row) {
2126: this .row = row;
2127: }
2128:
2129: public Object invoke(Object ref) {
2130: return row.getClone();
2131: }
2132: }
2133: }
|