0001: /*
0002:
0003: Derby - Class org.apache.derby.impl.sql.compile.FromBaseTable
0004:
0005: Licensed to the Apache Software Foundation (ASF) under one or more
0006: contributor license agreements. See the NOTICE file distributed with
0007: this work for additional information regarding copyright ownership.
0008: The ASF licenses this file to you under the Apache License, Version 2.0
0009: (the "License"); you may not use this file except in compliance with
0010: the License. You may obtain a copy of the License at
0011:
0012: http://www.apache.org/licenses/LICENSE-2.0
0013:
0014: Unless required by applicable law or agreed to in writing, software
0015: distributed under the License is distributed on an "AS IS" BASIS,
0016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017: See the License for the specific language governing permissions and
0018: limitations under the License.
0019:
0020: */
0021:
0022: package org.apache.derby.impl.sql.compile;
0023:
0024: import org.apache.derby.catalog.IndexDescriptor;
0025: import org.apache.derby.iapi.util.StringUtil;
0026:
0027: import org.apache.derby.iapi.reference.ClassName;
0028: import org.apache.derby.iapi.reference.SQLState;
0029:
0030: import org.apache.derby.iapi.services.io.FormatableBitSet;
0031: import org.apache.derby.iapi.services.io.FormatableArrayHolder;
0032: import org.apache.derby.iapi.services.io.FormatableIntHolder;
0033: import org.apache.derby.iapi.util.JBitSet;
0034: import org.apache.derby.iapi.util.ReuseFactory;
0035: import org.apache.derby.iapi.services.classfile.VMOpcode;
0036:
0037: import org.apache.derby.iapi.services.compiler.MethodBuilder;
0038: import org.apache.derby.iapi.services.property.PropertyUtil;
0039: import org.apache.derby.iapi.services.sanity.SanityManager;
0040:
0041: import org.apache.derby.iapi.error.StandardException;
0042:
0043: import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
0044:
0045: import org.apache.derby.iapi.sql.compile.CompilerContext;
0046: import org.apache.derby.iapi.sql.compile.OptimizablePredicateList;
0047: import org.apache.derby.iapi.sql.compile.Optimizer;
0048: import org.apache.derby.iapi.sql.compile.OptimizablePredicate;
0049: import org.apache.derby.iapi.sql.compile.Optimizable;
0050: import org.apache.derby.iapi.sql.compile.CostEstimate;
0051: import org.apache.derby.iapi.sql.compile.AccessPath;
0052: import org.apache.derby.iapi.sql.compile.JoinStrategy;
0053: import org.apache.derby.iapi.sql.compile.RowOrdering;
0054: import org.apache.derby.iapi.sql.compile.C_NodeTypes;
0055:
0056: import org.apache.derby.iapi.sql.dictionary.DataDictionary;
0057: import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
0058: import org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList;
0059: import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;
0060: import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
0061: import org.apache.derby.iapi.sql.dictionary.IndexRowGenerator;
0062: import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
0063: import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
0064: import org.apache.derby.iapi.sql.dictionary.ViewDescriptor;
0065:
0066: import org.apache.derby.iapi.sql.execute.ExecRow;
0067: import org.apache.derby.iapi.sql.execute.ExecutionContext;
0068:
0069: import org.apache.derby.iapi.sql.LanguageProperties;
0070:
0071: import org.apache.derby.iapi.store.access.StaticCompiledOpenConglomInfo;
0072: import org.apache.derby.iapi.store.access.StoreCostController;
0073: import org.apache.derby.iapi.store.access.ScanController;
0074: import org.apache.derby.iapi.store.access.TransactionController;
0075:
0076: import org.apache.derby.iapi.types.DataValueDescriptor;
0077:
0078: import org.apache.derby.impl.sql.compile.ExpressionClassBuilder;
0079: import org.apache.derby.impl.sql.compile.ActivationClassBuilder;
0080:
0081: import java.util.Enumeration;
0082: import java.util.Properties;
0083: import java.util.Vector;
0084: import java.util.HashSet;
0085: import java.util.Set;
0086:
0087: /**
0088: * A FromBaseTable represents a table in the FROM list of a DML statement,
0089: * as distinguished from a FromSubquery, which represents a subquery in the
0090: * FROM list. A FromBaseTable may actually represent a view. During parsing,
0091: * we can't distinguish views from base tables. During binding, when we
0092: * find FromBaseTables that represent views, we replace them with FromSubqueries.
0093: * By the time we get to code generation, all FromSubqueries have been eliminated,
0094: * and all FromBaseTables will represent only true base tables.
0095: * <p>
0096: * <B>Positioned Update</B>: Currently, all columns of an updatable cursor
0097: * are selected to deal with a positioned update. This is because we don't
0098: * know what columns will ultimately be needed from the UpdateNode above
0099: * us. For example, consider:<pre><i>
0100: *
0101: * get c as 'select cint from t for update of ctinyint'
0102: * update t set ctinyint = csmallint
0103: *
0104: * </pre></i> Ideally, the cursor only selects cint. Then,
0105: * something akin to an IndexRowToBaseRow is generated to
0106: * take the CursorResultSet and get the appropriate columns
0107: * out of the base table from the RowLocation retunrned by the
0108: * cursor. Then the update node can generate the appropriate
0109: * NormalizeResultSet (or whatever else it might need) to
0110: * get things into the correct format for the UpdateResultSet.
0111: * See CurrentOfNode for more information.
0112: *
0113: * @author Jeff Lichtman
0114: */
0115:
0116: public class FromBaseTable extends FromTable {
0117: static final int UNSET = -1;
0118:
0119: TableName tableName;
0120: TableDescriptor tableDescriptor;
0121:
0122: ConglomerateDescriptor baseConglomerateDescriptor;
0123: ConglomerateDescriptor[] conglomDescs;
0124:
0125: int updateOrDelete;
0126:
0127: /*
0128: ** The number of rows to bulkFetch.
0129: ** Initially it is unset. If the user
0130: ** uses the bulkFetch table property,
0131: ** it is set to that. Otherwise, it
0132: ** may be turned on if it isn't an updatable
0133: ** cursor and it is the right type of
0134: ** result set (more than 1 row expected to
0135: ** be returned, and not hash, which does its
0136: ** own bulk fetch, and subquery).
0137: */
0138: int bulkFetch = UNSET;
0139:
0140: /* We may turn off bulk fetch for a variety of reasons,
0141: * including because of the min optimization.
0142: * bulkFetchTurnedOff is set to true in those cases.
0143: */
0144: boolean bulkFetchTurnedOff;
0145:
0146: private double singleScanRowCount;
0147:
0148: private FormatableBitSet referencedCols;
0149: private ResultColumnList templateColumns;
0150:
0151: /* A 0-based array of column names for this table used
0152: * for optimizer trace.
0153: */
0154: private String[] columnNames;
0155:
0156: // true if we are to do a special scan to retrieve the last value
0157: // in the index
0158: private boolean specialMaxScan;
0159:
0160: // true if we are to do a distinct scan
0161: private boolean distinctScan;
0162:
0163: /**
0164: *Information for dependent table scan for Referential Actions
0165: */
0166: private boolean raDependentScan;
0167: private String raParentResultSetId;
0168: private long fkIndexConglomId;
0169: private int[] fkColArray;
0170:
0171: /**
0172: * Restriction as a PredicateList
0173: */
0174: PredicateList baseTableRestrictionList;
0175: PredicateList nonBaseTableRestrictionList;
0176: PredicateList restrictionList;
0177: PredicateList storeRestrictionList;
0178: PredicateList nonStoreRestrictionList;
0179: PredicateList requalificationRestrictionList;
0180:
0181: public static final int UPDATE = 1;
0182: public static final int DELETE = 2;
0183:
0184: /* Variables for EXISTS FBTs */
0185: private boolean existsBaseTable;
0186: private boolean isNotExists; //is a NOT EXISTS base table
0187: private JBitSet dependencyMap;
0188:
0189: private boolean getUpdateLocks;
0190:
0191: /**
0192: * Initializer for a table in a FROM list. Parameters are as follows:
0193: *
0194: * <ul>
0195: * <li>tableName The name of the table</li>
0196: * <li>correlationName The correlation name</li>
0197: * <li>derivedRCL The derived column list</li>
0198: * <li>tableProperties The Properties list associated with the table.</li>
0199: * </ul>
0200: *
0201: * <p>
0202: * - OR -
0203: * </p>
0204: *
0205: * <ul>
0206: * <li>tableName The name of the table</li>
0207: * <li>correlationName The correlation name</li>
0208: * <li>updateOrDelete Table is being updated/deleted from. </li>
0209: * <li>derivedRCL The derived column list</li>
0210: * </ul>
0211: */
0212: public void init(Object arg1, Object arg2, Object arg3, Object arg4) {
0213: if (arg3 instanceof Integer) {
0214: init(arg2, null);
0215: this .tableName = (TableName) arg1;
0216: this .updateOrDelete = ((Integer) arg3).intValue();
0217: resultColumns = (ResultColumnList) arg4;
0218: } else {
0219: init(arg2, arg4);
0220: this .tableName = (TableName) arg1;
0221: resultColumns = (ResultColumnList) arg3;
0222: }
0223:
0224: setOrigTableName(this .tableName);
0225: templateColumns = resultColumns;
0226: }
0227:
0228: /**
0229: * no LOJ reordering for base table.
0230: */
0231: public boolean LOJ_reorderable(int numTables)
0232: throws StandardException {
0233: return false;
0234: }
0235:
0236: public JBitSet LOJgetReferencedTables(int numTables)
0237: throws StandardException {
0238: JBitSet map = new JBitSet(numTables);
0239: fillInReferencedTableMap(map);
0240: return map;
0241: }
0242:
0243: /*
0244: * Optimizable interface.
0245: */
0246:
0247: /**
0248: * @see Optimizable#nextAccessPath
0249: *
0250: * @exception StandardException Thrown on error
0251: */
0252: public boolean nextAccessPath(Optimizer optimizer,
0253: OptimizablePredicateList predList, RowOrdering rowOrdering)
0254: throws StandardException {
0255: String userSpecifiedIndexName = getUserSpecifiedIndexName();
0256: AccessPath ap = getCurrentAccessPath();
0257: ConglomerateDescriptor currentConglomerateDescriptor = ap
0258: .getConglomerateDescriptor();
0259:
0260: optimizer.trace(Optimizer.CALLING_NEXT_ACCESS_PATH,
0261: ((predList == null) ? 0 : predList.size()), 0, 0.0,
0262: getExposedName());
0263:
0264: /*
0265: ** Remove the ordering of the current conglomerate descriptor,
0266: ** if any.
0267: */
0268: rowOrdering.removeOptimizable(getTableNumber());
0269:
0270: // RESOLVE: This will have to be modified to step through the
0271: // join strategies as well as the conglomerates.
0272:
0273: if (userSpecifiedIndexName != null) {
0274: /*
0275: ** User specified an index name, so we should look at only one
0276: ** index. If there is a current conglomerate descriptor, and there
0277: ** are no more join strategies, we've already looked at the index,
0278: ** so go back to null.
0279: */
0280: if (currentConglomerateDescriptor != null) {
0281: if (!super .nextAccessPath(optimizer, predList,
0282: rowOrdering)) {
0283: currentConglomerateDescriptor = null;
0284: }
0285: } else {
0286: optimizer.trace(Optimizer.LOOKING_FOR_SPECIFIED_INDEX,
0287: tableNumber, 0, 0.0, userSpecifiedIndexName);
0288:
0289: if (StringUtil.SQLToUpperCase(userSpecifiedIndexName)
0290: .equals("NULL")) {
0291: /* Special case - user-specified table scan */
0292: currentConglomerateDescriptor = tableDescriptor
0293: .getConglomerateDescriptor(tableDescriptor
0294: .getHeapConglomerateId());
0295: } else {
0296: /* User-specified index name */
0297: getConglomDescs();
0298:
0299: for (int index = 0; index < conglomDescs.length; index++) {
0300: currentConglomerateDescriptor = conglomDescs[index];
0301: String conglomerateName = currentConglomerateDescriptor
0302: .getConglomerateName();
0303: if (conglomerateName != null) {
0304: /* Have we found the desired index? */
0305: if (conglomerateName
0306: .equals(userSpecifiedIndexName)) {
0307: break;
0308: }
0309: }
0310: }
0311:
0312: /* We should always find a match */
0313: if (SanityManager.DEBUG) {
0314: if (currentConglomerateDescriptor == null) {
0315: SanityManager
0316: .THROWASSERT("Expected to find match for forced index "
0317: + userSpecifiedIndexName);
0318: }
0319: }
0320: }
0321:
0322: if (!super .nextAccessPath(optimizer, predList,
0323: rowOrdering)) {
0324: if (SanityManager.DEBUG) {
0325: SanityManager
0326: .THROWASSERT("No join strategy found");
0327: }
0328: }
0329: }
0330: } else {
0331: if (currentConglomerateDescriptor != null) {
0332: /*
0333: ** Once we have a conglomerate descriptor, cycle through
0334: ** the join strategies (done in parent).
0335: */
0336: if (!super .nextAccessPath(optimizer, predList,
0337: rowOrdering)) {
0338: /*
0339: ** When we're out of join strategies, go to the next
0340: ** conglomerate descriptor.
0341: */
0342: currentConglomerateDescriptor = getNextConglom(currentConglomerateDescriptor);
0343:
0344: /*
0345: ** New conglomerate, so step through join strategies
0346: ** again.
0347: */
0348: resetJoinStrategies(optimizer);
0349:
0350: if (!super .nextAccessPath(optimizer, predList,
0351: rowOrdering)) {
0352: if (SanityManager.DEBUG) {
0353: SanityManager
0354: .THROWASSERT("No join strategy found");
0355: }
0356: }
0357: }
0358: } else {
0359: /* Get the first conglomerate descriptor */
0360: currentConglomerateDescriptor = getFirstConglom();
0361:
0362: if (!super .nextAccessPath(optimizer, predList,
0363: rowOrdering)) {
0364: if (SanityManager.DEBUG) {
0365: SanityManager
0366: .THROWASSERT("No join strategy found");
0367: }
0368: }
0369: }
0370: }
0371:
0372: if (currentConglomerateDescriptor == null) {
0373: optimizer.trace(Optimizer.NO_MORE_CONGLOMERATES,
0374: tableNumber, 0, 0.0, null);
0375: } else {
0376: currentConglomerateDescriptor.setColumnNames(columnNames);
0377: optimizer.trace(Optimizer.CONSIDERING_CONGLOMERATE,
0378: tableNumber, 0, 0.0, currentConglomerateDescriptor);
0379: }
0380:
0381: /*
0382: ** Tell the rowOrdering that what the ordering of this conglomerate is
0383: */
0384: if (currentConglomerateDescriptor != null) {
0385: if (!currentConglomerateDescriptor.isIndex()) {
0386: /* If we are scanning the heap, but there
0387: * is a full match on a unique key, then
0388: * we can say that the table IS NOT unordered.
0389: * (We can't currently say what the ordering is
0390: * though.)
0391: */
0392: if (!isOneRowResultSet(predList)) {
0393: optimizer.trace(
0394: Optimizer.ADDING_UNORDERED_OPTIMIZABLE,
0395: ((predList == null) ? 0 : predList.size()),
0396: 0, 0.0, null);
0397:
0398: rowOrdering.addUnorderedOptimizable(this );
0399: } else {
0400: optimizer
0401: .trace(
0402: Optimizer.SCANNING_HEAP_FULL_MATCH_ON_UNIQUE_KEY,
0403: 0, 0, 0.0, null);
0404: }
0405: } else {
0406: IndexRowGenerator irg = currentConglomerateDescriptor
0407: .getIndexDescriptor();
0408:
0409: int[] baseColumnPositions = irg.baseColumnPositions();
0410: boolean[] isAscending = irg.isAscending();
0411:
0412: for (int i = 0; i < baseColumnPositions.length; i++) {
0413: /*
0414: ** Don't add the column to the ordering if it's already
0415: ** an ordered column. This can happen in the following
0416: ** case:
0417: **
0418: ** create index ti on t(x, y);
0419: ** select * from t where x = 1 order by y;
0420: **
0421: ** Column x is always ordered, so we want to avoid the
0422: ** sort when using index ti. This is accomplished by
0423: ** making column y appear as the first ordered column
0424: ** in the list.
0425: */
0426: if (!rowOrdering.orderedOnColumn(
0427: isAscending[i] ? RowOrdering.ASCENDING
0428: : RowOrdering.DESCENDING,
0429: getTableNumber(), baseColumnPositions[i])) {
0430: rowOrdering
0431: .nextOrderPosition(isAscending[i] ? RowOrdering.ASCENDING
0432: : RowOrdering.DESCENDING);
0433:
0434: rowOrdering.addOrderedColumn(
0435: isAscending[i] ? RowOrdering.ASCENDING
0436: : RowOrdering.DESCENDING,
0437: getTableNumber(),
0438: baseColumnPositions[i]);
0439: }
0440: }
0441: }
0442: }
0443:
0444: ap.setConglomerateDescriptor(currentConglomerateDescriptor);
0445:
0446: return currentConglomerateDescriptor != null;
0447: }
0448:
0449: /** Tell super-class that this Optimizable can be ordered */
0450: protected boolean canBeOrdered() {
0451: return true;
0452: }
0453:
0454: /**
0455: * @see org.apache.derby.iapi.sql.compile.Optimizable#optimizeIt
0456: *
0457: * @exception StandardException Thrown on error
0458: */
0459: public CostEstimate optimizeIt(Optimizer optimizer,
0460: OptimizablePredicateList predList, CostEstimate outerCost,
0461: RowOrdering rowOrdering) throws StandardException {
0462: optimizer.costOptimizable(this , tableDescriptor,
0463: getCurrentAccessPath().getConglomerateDescriptor(),
0464: predList, outerCost);
0465:
0466: // The cost that we found from the above call is now stored in the
0467: // cost field of this FBT's current access path. So that's the
0468: // cost we want to return here.
0469: return getCurrentAccessPath().getCostEstimate();
0470: }
0471:
0472: /** @see Optimizable#getTableDescriptor */
0473: public TableDescriptor getTableDescriptor() {
0474: return tableDescriptor;
0475: }
0476:
0477: /** @see Optimizable#isMaterializable
0478: *
0479: * @exception StandardException Thrown on error
0480: */
0481: public boolean isMaterializable() throws StandardException {
0482: /* base tables are always materializable */
0483: return true;
0484: }
0485:
0486: /**
0487: * @see Optimizable#pushOptPredicate
0488: *
0489: * @exception StandardException Thrown on error
0490: */
0491:
0492: public boolean pushOptPredicate(
0493: OptimizablePredicate optimizablePredicate)
0494: throws StandardException {
0495: if (SanityManager.DEBUG) {
0496: SanityManager
0497: .ASSERT(optimizablePredicate instanceof Predicate,
0498: "optimizablePredicate expected to be instanceof Predicate");
0499: }
0500:
0501: /* Add the matching predicate to the restrictionList */
0502: restrictionList.addPredicate((Predicate) optimizablePredicate);
0503:
0504: return true;
0505: }
0506:
0507: /**
0508: * @see Optimizable#pullOptPredicates
0509: *
0510: * @exception StandardException Thrown on error
0511: */
0512: public void pullOptPredicates(
0513: OptimizablePredicateList optimizablePredicates)
0514: throws StandardException {
0515: for (int i = restrictionList.size() - 1; i >= 0; i--) {
0516: optimizablePredicates.addOptPredicate(restrictionList
0517: .getOptPredicate(i));
0518: restrictionList.removeOptPredicate(i);
0519: }
0520: }
0521:
0522: /**
0523: * @see Optimizable#isCoveringIndex
0524: * @exception StandardException Thrown on error
0525: */
0526: public boolean isCoveringIndex(ConglomerateDescriptor cd)
0527: throws StandardException {
0528: boolean coveringIndex = true;
0529: IndexRowGenerator irg;
0530: int[] baseCols;
0531: int colPos;
0532:
0533: /* You can only be a covering index if you're an index */
0534: if (!cd.isIndex())
0535: return false;
0536:
0537: irg = cd.getIndexDescriptor();
0538: baseCols = irg.baseColumnPositions();
0539:
0540: /* First we check to see if this is a covering index */
0541: int rclSize = resultColumns.size();
0542: for (int index = 0; index < rclSize; index++) {
0543: ResultColumn rc = (ResultColumn) resultColumns
0544: .elementAt(index);
0545:
0546: /* Ignore unreferenced columns */
0547: if (!rc.isReferenced()) {
0548: continue;
0549: }
0550:
0551: /* Ignore constants - this can happen if all of the columns
0552: * were projected out and we ended up just generating
0553: * a "1" in RCL.doProject().
0554: */
0555: if (rc.getExpression() instanceof ConstantNode) {
0556: continue;
0557: }
0558:
0559: coveringIndex = false;
0560:
0561: colPos = rc.getColumnPosition();
0562:
0563: /* Is this column in the index? */
0564: for (int i = 0; i < baseCols.length; i++) {
0565: if (colPos == baseCols[i]) {
0566: coveringIndex = true;
0567: break;
0568: }
0569: }
0570:
0571: /* No need to continue if the column was not in the index */
0572: if (!coveringIndex) {
0573: break;
0574: }
0575: }
0576: return coveringIndex;
0577: }
0578:
0579: /** @see Optimizable#verifyProperties
0580: * @exception StandardException Thrown on error
0581: */
0582: public void verifyProperties(DataDictionary dDictionary)
0583: throws StandardException {
0584: if (tableProperties == null) {
0585: return;
0586: }
0587: /* Check here for:
0588: * invalid properties key
0589: * index and constraint properties
0590: * non-existent index
0591: * non-existent constraint
0592: * invalid joinStrategy
0593: * invalid value for hashInitialCapacity
0594: * invalid value for hashLoadFactor
0595: * invalid value for hashMaxCapacity
0596: */
0597: boolean indexSpecified = false;
0598: boolean constraintSpecified = false;
0599: ConstraintDescriptor consDesc = null;
0600: Enumeration e = tableProperties.keys();
0601:
0602: StringUtil.SQLEqualsIgnoreCase(tableDescriptor.getSchemaName(),
0603: "SYS");
0604: while (e.hasMoreElements()) {
0605: String key = (String) e.nextElement();
0606: String value = (String) tableProperties.get(key);
0607:
0608: if (key.equals("index")) {
0609: // User only allowed to specify 1 of index and constraint, not both
0610: if (constraintSpecified) {
0611: throw StandardException
0612: .newException(
0613: SQLState.LANG_BOTH_FORCE_INDEX_AND_CONSTRAINT_SPECIFIED,
0614: getBaseTableName());
0615: }
0616: indexSpecified = true;
0617:
0618: /* Validate index name - NULL means table scan */
0619: if (!StringUtil.SQLToUpperCase(value).equals("NULL")) {
0620: ConglomerateDescriptor cd = null;
0621: ConglomerateDescriptor[] cds = tableDescriptor
0622: .getConglomerateDescriptors();
0623:
0624: for (int index = 0; index < cds.length; index++) {
0625: cd = cds[index];
0626: String conglomerateName = cd
0627: .getConglomerateName();
0628: if (conglomerateName != null) {
0629: if (conglomerateName.equals(value)) {
0630: break;
0631: }
0632: }
0633: // Not a match, clear cd
0634: cd = null;
0635: }
0636:
0637: // Throw exception if user specified index not found
0638: if (cd == null) {
0639: throw StandardException.newException(
0640: SQLState.LANG_INVALID_FORCED_INDEX1,
0641: value, getBaseTableName());
0642: }
0643: /* Query is dependent on the ConglomerateDescriptor */
0644: getCompilerContext().createDependency(cd);
0645: }
0646: } else if (key.equals("constraint")) {
0647: // User only allowed to specify 1 of index and constraint, not both
0648: if (indexSpecified) {
0649: throw StandardException
0650: .newException(
0651: SQLState.LANG_BOTH_FORCE_INDEX_AND_CONSTRAINT_SPECIFIED,
0652: getBaseTableName());
0653: }
0654: constraintSpecified = true;
0655:
0656: if (!StringUtil.SQLToUpperCase(value).equals("NULL")) {
0657: consDesc = dDictionary
0658: .getConstraintDescriptorByName(
0659: tableDescriptor,
0660: (SchemaDescriptor) null, value,
0661: false);
0662:
0663: /* Throw exception if user specified constraint not found
0664: * or if it does not have a backing index.
0665: */
0666: if ((consDesc == null)
0667: || !consDesc.hasBackingIndex()) {
0668: throw StandardException.newException(
0669: SQLState.LANG_INVALID_FORCED_INDEX2,
0670: value, getBaseTableName());
0671: }
0672:
0673: /* Query is dependent on the ConstraintDescriptor */
0674: getCompilerContext().createDependency(consDesc);
0675: }
0676: } else if (key.equals("joinStrategy")) {
0677: userSpecifiedJoinStrategy = StringUtil
0678: .SQLToUpperCase(value);
0679: } else if (key.equals("hashInitialCapacity")) {
0680: initialCapacity = getIntProperty(value, key);
0681:
0682: // verify that the specified value is valid
0683: if (initialCapacity <= 0) {
0684: throw StandardException
0685: .newException(
0686: SQLState.LANG_INVALID_HASH_INITIAL_CAPACITY,
0687: String.valueOf(initialCapacity));
0688: }
0689: } else if (key.equals("hashLoadFactor")) {
0690: try {
0691: loadFactor = Float.valueOf(value).floatValue();
0692: } catch (NumberFormatException nfe) {
0693: throw StandardException
0694: .newException(
0695: SQLState.LANG_INVALID_NUMBER_FORMAT_FOR_OVERRIDE,
0696: value, key);
0697: }
0698:
0699: // verify that the specified value is valid
0700: if (loadFactor <= 0.0 || loadFactor > 1.0) {
0701: throw StandardException.newException(
0702: SQLState.LANG_INVALID_HASH_LOAD_FACTOR,
0703: value);
0704: }
0705: } else if (key.equals("hashMaxCapacity")) {
0706: maxCapacity = getIntProperty(value, key);
0707:
0708: // verify that the specified value is valid
0709: if (maxCapacity <= 0) {
0710: throw StandardException.newException(
0711: SQLState.LANG_INVALID_HASH_MAX_CAPACITY,
0712: String.valueOf(maxCapacity));
0713: }
0714: } else if (key.equals("bulkFetch")) {
0715: bulkFetch = getIntProperty(value, key);
0716:
0717: // verify that the specified value is valid
0718: if (bulkFetch <= 0) {
0719: throw StandardException.newException(
0720: SQLState.LANG_INVALID_BULK_FETCH_VALUE,
0721: String.valueOf(bulkFetch));
0722: }
0723:
0724: // no bulk fetch on updatable scans
0725: if (forUpdate()) {
0726: throw StandardException
0727: .newException(SQLState.LANG_INVALID_BULK_FETCH_UPDATEABLE);
0728: }
0729: } else {
0730: // No other "legal" values at this time
0731: throw StandardException.newException(
0732: SQLState.LANG_INVALID_FROM_TABLE_PROPERTY, key,
0733: "index, constraint, joinStrategy");
0734: }
0735: }
0736:
0737: /* If user specified a non-null constraint name(DERBY-1707), then
0738: * replace it in the properties list with the underlying index name to
0739: * simplify the code in the optimizer.
0740: * NOTE: The code to get from the constraint name, for a constraint
0741: * with a backing index, to the index name is convoluted. Given
0742: * the constraint name, we can get the conglomerate id from the
0743: * ConstraintDescriptor. We then use the conglomerate id to get
0744: * the ConglomerateDescriptor from the DataDictionary and, finally,
0745: * we get the index name (conglomerate name) from the ConglomerateDescriptor.
0746: */
0747: if (constraintSpecified && consDesc != null) {
0748: ConglomerateDescriptor cd = dDictionary
0749: .getConglomerateDescriptor(consDesc
0750: .getConglomerateId());
0751: String indexName = cd.getConglomerateName();
0752:
0753: tableProperties.remove("constraint");
0754: tableProperties.put("index", indexName);
0755: }
0756: }
0757:
0758: /** @see Optimizable#getBaseTableName */
0759: public String getBaseTableName() {
0760: return tableName.getTableName();
0761: }
0762:
0763: /** @see Optimizable#startOptimizing */
0764: public void startOptimizing(Optimizer optimizer,
0765: RowOrdering rowOrdering) {
0766: AccessPath ap = getCurrentAccessPath();
0767: AccessPath bestAp = getBestAccessPath();
0768: AccessPath bestSortAp = getBestSortAvoidancePath();
0769:
0770: ap.setConglomerateDescriptor((ConglomerateDescriptor) null);
0771: bestAp.setConglomerateDescriptor((ConglomerateDescriptor) null);
0772: bestSortAp
0773: .setConglomerateDescriptor((ConglomerateDescriptor) null);
0774: ap.setCoveringIndexScan(false);
0775: bestAp.setCoveringIndexScan(false);
0776: bestSortAp.setCoveringIndexScan(false);
0777: ap.setLockMode(0);
0778: bestAp.setLockMode(0);
0779: bestSortAp.setLockMode(0);
0780:
0781: /*
0782: ** Only need to do this for current access path, because the
0783: ** costEstimate will be copied to the best access paths as
0784: ** necessary.
0785: */
0786: CostEstimate costEstimate = getCostEstimate(optimizer);
0787: ap.setCostEstimate(costEstimate);
0788:
0789: /*
0790: ** This is the initial cost of this optimizable. Initialize it
0791: ** to the maximum cost so that the optimizer will think that
0792: ** any access path is better than none.
0793: */
0794: costEstimate.setCost(Double.MAX_VALUE, Double.MAX_VALUE,
0795: Double.MAX_VALUE);
0796:
0797: super .startOptimizing(optimizer, rowOrdering);
0798: }
0799:
0800: /** @see Optimizable#convertAbsoluteToRelativeColumnPosition */
0801: public int convertAbsoluteToRelativeColumnPosition(
0802: int absolutePosition) {
0803: return mapAbsoluteToRelativeColumnPosition(absolutePosition);
0804: }
0805:
0806: /**
0807: * @see Optimizable#estimateCost
0808: *
0809: * @exception StandardException Thrown on error
0810: */
0811: public CostEstimate estimateCost(OptimizablePredicateList predList,
0812: ConglomerateDescriptor cd, CostEstimate outerCost,
0813: Optimizer optimizer, RowOrdering rowOrdering)
0814: throws StandardException {
0815: double cost;
0816: boolean statisticsForTable = false;
0817: boolean statisticsForConglomerate = false;
0818: /* unknownPredicateList contains all predicates whose effect on
0819: * cost/selectivity can't be calculated by the store.
0820: */
0821: PredicateList unknownPredicateList = null;
0822:
0823: if (optimizer.useStatistics() && predList != null) {
0824: /* if user has specified that we don't use statistics,
0825: pretend that statistics don't exist.
0826: */
0827: statisticsForConglomerate = tableDescriptor
0828: .statisticsExist(cd);
0829: statisticsForTable = tableDescriptor.statisticsExist(null);
0830: unknownPredicateList = new PredicateList();
0831: predList.copyPredicatesToOtherList(unknownPredicateList);
0832:
0833: }
0834:
0835: AccessPath currentAccessPath = getCurrentAccessPath();
0836: JoinStrategy currentJoinStrategy = currentAccessPath
0837: .getJoinStrategy();
0838:
0839: optimizer.trace(Optimizer.ESTIMATING_COST_OF_CONGLOMERATE,
0840: tableNumber, 0, 0.0, cd);
0841:
0842: /* Get the uniqueness factory for later use (see below) */
0843: double tableUniquenessFactor = optimizer
0844: .uniqueJoinWithOuterTable(predList);
0845:
0846: boolean oneRowResultSetForSomeConglom = isOneRowResultSet(predList);
0847:
0848: /* Get the predicates that can be used for scanning the base table */
0849: baseTableRestrictionList.removeAllElements();
0850:
0851: currentJoinStrategy.getBasePredicates(predList,
0852: baseTableRestrictionList, this );
0853:
0854: /* RESOLVE: Need to figure out how to cache the StoreCostController */
0855: StoreCostController scc = getStoreCostController(cd);
0856:
0857: CostEstimate costEstimate = getScratchCostEstimate(optimizer);
0858:
0859: /* First, get the cost for one scan */
0860:
0861: /* Does the conglomerate match at most one row? */
0862: if (isOneRowResultSet(cd, baseTableRestrictionList)) {
0863: /*
0864: ** Tell the RowOrdering that this optimizable is always ordered.
0865: ** It will figure out whether it is really always ordered in the
0866: ** context of the outer tables and their orderings.
0867: */
0868: rowOrdering.optimizableAlwaysOrdered(this );
0869:
0870: singleScanRowCount = 1.0;
0871:
0872: /* Yes, the cost is to fetch exactly one row */
0873: // RESOLVE: NEED TO FIGURE OUT HOW TO GET REFERENCED COLUMN LIST,
0874: // FIELD STATES, AND ACCESS TYPE
0875: cost = scc.getFetchFromFullKeyCost((FormatableBitSet) null,
0876: 0);
0877:
0878: optimizer.trace(Optimizer.MATCH_SINGLE_ROW_COST,
0879: tableNumber, 0, cost, null);
0880:
0881: costEstimate.setCost(cost, 1.0d, 1.0d);
0882:
0883: /*
0884: ** Let the join strategy decide whether the cost of the base
0885: ** scan is a single scan, or a scan per outer row.
0886: ** NOTE: The multiplication should only be done against the
0887: ** total row count, not the singleScanRowCount.
0888: */
0889: double newCost = costEstimate.getEstimatedCost();
0890:
0891: if (currentJoinStrategy.multiplyBaseCostByOuterRows()) {
0892: newCost *= outerCost.rowCount();
0893: }
0894:
0895: costEstimate.setCost(newCost, costEstimate.rowCount()
0896: * outerCost.rowCount(), costEstimate
0897: .singleScanRowCount());
0898:
0899: /*
0900: ** Choose the lock mode. If the start/stop conditions are
0901: ** constant, choose row locking, because we will always match
0902: ** the same row. If they are not constant (i.e. they include
0903: ** a join), we decide whether to do row locking based on
0904: ** the total number of rows for the life of the query.
0905: */
0906: boolean constantStartStop = true;
0907: for (int i = 0; i < predList.size(); i++) {
0908: OptimizablePredicate pred = predList.getOptPredicate(i);
0909:
0910: /*
0911: ** The predicates are in index order, so the start and
0912: ** stop keys should be first.
0913: */
0914: if (!(pred.isStartKey() || pred.isStopKey())) {
0915: break;
0916: }
0917:
0918: /* Stop when we've found a join */
0919: if (!pred.getReferencedMap().hasSingleBitSet()) {
0920: constantStartStop = false;
0921: break;
0922: }
0923: }
0924:
0925: if (constantStartStop) {
0926: currentAccessPath
0927: .setLockMode(TransactionController.MODE_RECORD);
0928:
0929: optimizer.trace(
0930: Optimizer.ROW_LOCK_ALL_CONSTANT_START_STOP, 0,
0931: 0, 0.0, null);
0932: } else {
0933: setLockingBasedOnThreshold(optimizer, costEstimate
0934: .rowCount());
0935: }
0936:
0937: optimizer.trace(Optimizer.COST_OF_N_SCANS, tableNumber, 0,
0938: outerCost.rowCount(), costEstimate);
0939:
0940: /* Add in cost of fetching base row for non-covering index */
0941: if (cd.isIndex() && (!isCoveringIndex(cd))) {
0942: double singleFetchCost = getBaseCostController()
0943: .getFetchFromRowLocationCost(
0944: (FormatableBitSet) null, 0);
0945: cost = singleFetchCost * costEstimate.rowCount();
0946:
0947: costEstimate.setEstimatedCost(costEstimate
0948: .getEstimatedCost()
0949: + cost);
0950:
0951: optimizer.trace(Optimizer.NON_COVERING_INDEX_COST,
0952: tableNumber, 0, cost, null);
0953: }
0954: } else {
0955: /* Conglomerate might match more than one row */
0956:
0957: /*
0958: ** Some predicates are good for start/stop, but we don't know
0959: ** the values they are being compared to at this time, so we
0960: ** estimate their selectivity in language rather than ask the
0961: ** store about them . The predicates on the first column of
0962: ** the conglomerate reduce the number of pages and rows scanned.
0963: ** The predicates on columns after the first reduce the number
0964: ** of rows scanned, but have a much smaller effect on the number
0965: ** of pages scanned, so we keep track of these selectivities in
0966: ** two separate variables: extraFirstColumnSelectivity and
0967: ** extraStartStopSelectivity. (Theoretically, we could try to
0968: ** figure out the effect of predicates after the first column
0969: ** on the number of pages scanned, but it's too hard, so we
0970: ** use these predicates only to reduce the estimated number of
0971: ** rows. For comparisons with known values, though, the store
0972: ** can figure out exactly how many rows and pages are scanned.)
0973: **
0974: ** Other predicates are not good for start/stop. We keep track
0975: ** of their selectvities separately, because these limit the
0976: ** number of rows, but not the number of pages, and so need to
0977: ** be factored into the row count but not into the cost.
0978: ** These selectivities are factored into extraQualifierSelectivity.
0979: **
0980: ** statStartStopSelectivity (using statistics) represents the
0981: ** selectivity of start/stop predicates that can be used to scan
0982: ** the index. If no statistics exist for the conglomerate then
0983: ** the value of this variable remains at 1.0
0984: **
0985: ** statCompositeSelectivity (using statistics) represents the
0986: ** selectivity of all the predicates (including NonBaseTable
0987: ** predicates). This represents the most educated guess [among
0988: ** all the wild surmises in this routine] as to the number
0989: ** of rows that will be returned from this joinNode.
0990: ** If no statistics exist on the table or no statistics at all
0991: ** can be found to satisfy the predicates at this join opertor,
0992: ** then statCompositeSelectivity is left initialized at 1.0
0993: */
0994: double extraFirstColumnSelectivity = 1.0d;
0995: double extraStartStopSelectivity = 1.0d;
0996: double extraQualifierSelectivity = 1.0d;
0997: double extraNonQualifierSelectivity = 1.0d;
0998: double statStartStopSelectivity = 1.0d;
0999: double statCompositeSelectivity = 1.0d;
1000:
1001: int numExtraFirstColumnPreds = 0;
1002: int numExtraStartStopPreds = 0;
1003: int numExtraQualifiers = 0;
1004: int numExtraNonQualifiers = 0;
1005:
1006: /*
1007: ** It is possible for something to be a start or stop predicate
1008: ** without it being possible to use it as a key for cost estimation.
1009: ** For example, with an index on (c1, c2), and the predicate
1010: ** c1 = othertable.c3 and c2 = 1, the comparison on c1 is with
1011: ** an unknown value, so we can't pass it to the store. This means
1012: ** we can't pass the comparison on c2 to the store, either.
1013: **
1014: ** The following booleans keep track of whether we have seen
1015: ** gaps in the keys we can pass to the store.
1016: */
1017: boolean startGap = false;
1018: boolean stopGap = false;
1019: boolean seenFirstColumn = false;
1020:
1021: /*
1022: ** We need to figure out the number of rows touched to decide
1023: ** whether to use row locking or table locking. If the start/stop
1024: ** conditions are constant (i.e. no joins), the number of rows
1025: ** touched is the number of rows per scan. But if the start/stop
1026: ** conditions contain a join, the number of rows touched must
1027: ** take the number of outer rows into account.
1028: */
1029: boolean constantStartStop = true;
1030: boolean startStopFound = false;
1031:
1032: /* Count the number of start and stop keys */
1033: int startKeyNum = 0;
1034: int stopKeyNum = 0;
1035: OptimizablePredicate pred;
1036: int predListSize;
1037:
1038: if (predList != null)
1039: predListSize = baseTableRestrictionList.size();
1040: else
1041: predListSize = 0;
1042:
1043: int startStopPredCount = 0;
1044: ColumnReference firstColumn = null;
1045: for (int i = 0; i < predListSize; i++) {
1046: pred = baseTableRestrictionList.getOptPredicate(i);
1047: boolean startKey = pred.isStartKey();
1048: boolean stopKey = pred.isStopKey();
1049: if (startKey || stopKey) {
1050: startStopFound = true;
1051:
1052: if (!pred.getReferencedMap().hasSingleBitSet()) {
1053: constantStartStop = false;
1054: }
1055:
1056: boolean knownConstant = pred
1057: .compareWithKnownConstant(this , true);
1058: if (startKey) {
1059: if (knownConstant && (!startGap)) {
1060: startKeyNum++;
1061: if (unknownPredicateList != null)
1062: unknownPredicateList
1063: .removeOptPredicate(pred);
1064: } else {
1065: startGap = true;
1066: }
1067: }
1068:
1069: if (stopKey) {
1070: if (knownConstant && (!stopGap)) {
1071: stopKeyNum++;
1072: if (unknownPredicateList != null)
1073: unknownPredicateList
1074: .removeOptPredicate(pred);
1075: } else {
1076: stopGap = true;
1077: }
1078: }
1079:
1080: /* If either we are seeing startGap or stopGap because start/stop key is
1081: * comparison with non-constant, we should multiply the selectivity to
1082: * extraFirstColumnSelectivity. Beetle 4787.
1083: */
1084: if (startGap || stopGap) {
1085: // Don't include redundant join predicates in selectivity calculations
1086: if (baseTableRestrictionList
1087: .isRedundantPredicate(i))
1088: continue;
1089:
1090: if (startKey && stopKey)
1091: startStopPredCount++;
1092:
1093: if (pred.getIndexPosition() == 0) {
1094: extraFirstColumnSelectivity *= pred
1095: .selectivity(this );
1096: if (!seenFirstColumn) {
1097: ValueNode relNode = ((Predicate) pred)
1098: .getAndNode().getLeftOperand();
1099: if (relNode instanceof BinaryRelationalOperatorNode)
1100: firstColumn = ((BinaryRelationalOperatorNode) relNode)
1101: .getColumnOperand(this );
1102: seenFirstColumn = true;
1103: }
1104: } else {
1105: extraStartStopSelectivity *= pred
1106: .selectivity(this );
1107: numExtraStartStopPreds++;
1108: }
1109: }
1110: } else {
1111: // Don't include redundant join predicates in selectivity calculations
1112: if (baseTableRestrictionList
1113: .isRedundantPredicate(i)) {
1114: continue;
1115: }
1116:
1117: /* If we have "like" predicate on the first index column, it is more likely
1118: * to have a smaller range than "between", so we apply extra selectivity 0.2
1119: * here. beetle 4387, 4787.
1120: */
1121: if (pred instanceof Predicate) {
1122: ValueNode leftOpnd = ((Predicate) pred)
1123: .getAndNode().getLeftOperand();
1124: if (firstColumn != null
1125: && leftOpnd instanceof LikeEscapeOperatorNode) {
1126: LikeEscapeOperatorNode likeNode = (LikeEscapeOperatorNode) leftOpnd;
1127: if (likeNode.getLeftOperand()
1128: .requiresTypeFromContext()) {
1129: ValueNode receiver = ((TernaryOperatorNode) likeNode)
1130: .getReceiver();
1131: if (receiver instanceof ColumnReference) {
1132: ColumnReference cr = (ColumnReference) receiver;
1133: if (cr.getTableNumber() == firstColumn
1134: .getTableNumber()
1135: && cr.getColumnNumber() == firstColumn
1136: .getColumnNumber())
1137: extraFirstColumnSelectivity *= 0.2;
1138: }
1139: }
1140: }
1141: }
1142:
1143: if (pred.isQualifier()) {
1144: extraQualifierSelectivity *= pred
1145: .selectivity(this );
1146: numExtraQualifiers++;
1147: } else {
1148: extraNonQualifierSelectivity *= pred
1149: .selectivity(this );
1150: numExtraNonQualifiers++;
1151: }
1152:
1153: /*
1154: ** Strictly speaking, it shouldn't be necessary to
1155: ** indicate a gap here, since there should be no more
1156: ** start/stop predicates, but let's do it, anyway.
1157: */
1158: startGap = true;
1159: stopGap = true;
1160: }
1161: }
1162:
1163: if (unknownPredicateList != null) {
1164: statCompositeSelectivity = unknownPredicateList
1165: .selectivity(this );
1166: if (statCompositeSelectivity == -1.0d)
1167: statCompositeSelectivity = 1.0d;
1168: }
1169:
1170: if (seenFirstColumn && statisticsForConglomerate
1171: && (startStopPredCount > 0)) {
1172: statStartStopSelectivity = tableDescriptor
1173: .selectivityForConglomerate(cd,
1174: startStopPredCount);
1175: }
1176:
1177: /*
1178: ** Factor the non-base-table predicates into the extra
1179: ** non-qualifier selectivity, since these will restrict the
1180: ** number of rows, but not the cost.
1181: */
1182: extraNonQualifierSelectivity *= currentJoinStrategy
1183: .nonBasePredicateSelectivity(this , predList);
1184:
1185: /* Create the start and stop key arrays, and fill them in */
1186: DataValueDescriptor[] startKeys;
1187: DataValueDescriptor[] stopKeys;
1188:
1189: if (startKeyNum > 0)
1190: startKeys = new DataValueDescriptor[startKeyNum];
1191: else
1192: startKeys = null;
1193:
1194: if (stopKeyNum > 0)
1195: stopKeys = new DataValueDescriptor[stopKeyNum];
1196: else
1197: stopKeys = null;
1198:
1199: startKeyNum = 0;
1200: stopKeyNum = 0;
1201: startGap = false;
1202: stopGap = false;
1203:
1204: for (int i = 0; i < predListSize; i++) {
1205: pred = baseTableRestrictionList.getOptPredicate(i);
1206: boolean startKey = pred.isStartKey();
1207: boolean stopKey = pred.isStopKey();
1208:
1209: if (startKey || stopKey) {
1210: boolean knownConstant = pred
1211: .compareWithKnownConstant(this , true);
1212:
1213: if (startKey) {
1214: if (knownConstant && (!startGap)) {
1215: startKeys[startKeyNum] = pred
1216: .getCompareValue(this );
1217: startKeyNum++;
1218: } else {
1219: startGap = true;
1220: }
1221: }
1222:
1223: if (stopKey) {
1224: if (knownConstant && (!stopGap)) {
1225: stopKeys[stopKeyNum] = pred
1226: .getCompareValue(this );
1227: stopKeyNum++;
1228: } else {
1229: stopGap = true;
1230: }
1231: }
1232: } else {
1233: startGap = true;
1234: stopGap = true;
1235: }
1236: }
1237:
1238: int startOperator;
1239: int stopOperator;
1240:
1241: if (baseTableRestrictionList != null) {
1242: startOperator = baseTableRestrictionList
1243: .startOperator(this );
1244: stopOperator = baseTableRestrictionList
1245: .stopOperator(this );
1246: } else {
1247: /*
1248: ** If we're doing a full scan, it doesn't matter what the
1249: ** start and stop operators are.
1250: */
1251: startOperator = ScanController.NA;
1252: stopOperator = ScanController.NA;
1253: }
1254:
1255: /*
1256: ** Get a row template for this conglomerate. For now, just tell
1257: ** it we are using all the columns in the row.
1258: */
1259: DataValueDescriptor[] rowTemplate = getRowTemplate(cd,
1260: getBaseCostController());
1261:
1262: /* we prefer index than table scan for concurrency reason, by a small
1263: * adjustment on estimated row count. This affects optimizer's decision
1264: * especially when few rows are in table. beetle 5006. This makes sense
1265: * since the plan may stay long before we actually check and invalidate it.
1266: * And new rows may be inserted before we check and invalidate the plan.
1267: * Here we only prefer index that has start/stop key from predicates. Non-
1268: * constant start/stop key case is taken care of by selectivity later.
1269: */
1270: long baseRC = (startKeys != null || stopKeys != null) ? baseRowCount()
1271: : baseRowCount() + 5;
1272:
1273: scc.getScanCost(currentJoinStrategy.scanCostType(), baseRC,
1274: 1, forUpdate(), (FormatableBitSet) null,
1275: rowTemplate, startKeys, startOperator, stopKeys,
1276: stopOperator, false, 0, costEstimate);
1277:
1278: /* initialPositionCost is the first part of the index scan cost we get above.
1279: * It's the cost of initial positioning/fetch of key. So it's unrelated to
1280: * row count of how many rows we fetch from index. We extract it here so that
1281: * we only multiply selectivity to the other part of index scan cost, which is
1282: * nearly linear, to make cost calculation more accurate and fair, especially
1283: * compared to the plan of "one row result set" (unique index). beetle 4787.
1284: */
1285: double initialPositionCost = 0.0;
1286: if (cd.isIndex()) {
1287: initialPositionCost = scc.getFetchFromFullKeyCost(
1288: (FormatableBitSet) null, 0);
1289: /* oneRowResultSetForSomeConglom means there's a unique index, but certainly
1290: * not this one since we are here. If store knows this non-unique index
1291: * won't return any row or just returns one row (eg., the predicate is a
1292: * comparison with constant or almost empty table), we do minor adjustment
1293: * on cost (affecting decision for covering index) and rc (decision for
1294: * non-covering). The purpose is favoring unique index. beetle 5006.
1295: */
1296: if (oneRowResultSetForSomeConglom
1297: && costEstimate.rowCount() <= 1) {
1298: costEstimate.setCost(costEstimate
1299: .getEstimatedCost() * 2, costEstimate
1300: .rowCount() + 2, costEstimate
1301: .singleScanRowCount() + 2);
1302: }
1303: }
1304:
1305: optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN1,
1306: tableNumber, 0, 0.0, cd);
1307: optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN2,
1308: tableNumber, 0, 0.0, costEstimate);
1309: optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN3,
1310: numExtraFirstColumnPreds, 0,
1311: extraFirstColumnSelectivity, null);
1312: optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN4,
1313: numExtraStartStopPreds, 0,
1314: extraStartStopSelectivity, null);
1315: optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN7,
1316: startStopPredCount, 0, statStartStopSelectivity,
1317: null);
1318: optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN5,
1319: numExtraQualifiers, 0, extraQualifierSelectivity,
1320: null);
1321: optimizer.trace(Optimizer.COST_OF_CONGLOMERATE_SCAN6,
1322: numExtraNonQualifiers, 0,
1323: extraNonQualifierSelectivity, null);
1324:
1325: /* initial row count is the row count without applying
1326: any predicates-- we use this at the end of the routine
1327: when we use statistics to recompute the row count.
1328: */
1329: double initialRowCount = costEstimate.rowCount();
1330:
1331: if (statStartStopSelectivity != 1.0d) {
1332: /*
1333: ** If statistics exist use the selectivity computed
1334: ** from the statistics to calculate the cost.
1335: ** NOTE: we apply this selectivity to the cost as well
1336: ** as both the row counts. In the absence of statistics
1337: ** we only applied the FirstColumnSelectivity to the
1338: ** cost.
1339: */
1340: costEstimate.setCost(scanCostAfterSelectivity(
1341: costEstimate.getEstimatedCost(),
1342: initialPositionCost, statStartStopSelectivity,
1343: oneRowResultSetForSomeConglom), costEstimate
1344: .rowCount()
1345: * statStartStopSelectivity, costEstimate
1346: .singleScanRowCount()
1347: * statStartStopSelectivity);
1348: optimizer.trace(
1349: Optimizer.COST_INCLUDING_STATS_FOR_INDEX,
1350: tableNumber, 0, 0.0, costEstimate);
1351:
1352: } else {
1353: /*
1354: ** Factor in the extra selectivity on the first column
1355: ** of the conglomerate (see comment above).
1356: ** NOTE: In this case we want to apply the selectivity to both
1357: ** the total row count and singleScanRowCount.
1358: */
1359: if (extraFirstColumnSelectivity != 1.0d) {
1360: costEstimate.setCost(scanCostAfterSelectivity(
1361: costEstimate.getEstimatedCost(),
1362: initialPositionCost,
1363: extraFirstColumnSelectivity,
1364: oneRowResultSetForSomeConglom),
1365: costEstimate.rowCount()
1366: * extraFirstColumnSelectivity,
1367: costEstimate.singleScanRowCount()
1368: * extraFirstColumnSelectivity);
1369:
1370: optimizer
1371: .trace(
1372: Optimizer.COST_INCLUDING_EXTRA_1ST_COL_SELECTIVITY,
1373: tableNumber, 0, 0.0, costEstimate);
1374: }
1375:
1376: /* Factor in the extra start/stop selectivity (see comment above).
1377: * NOTE: In this case we want to apply the selectivity to both
1378: * the row count and singleScanRowCount.
1379: */
1380: if (extraStartStopSelectivity != 1.0d) {
1381: costEstimate.setCost(costEstimate
1382: .getEstimatedCost(), costEstimate
1383: .rowCount()
1384: * extraStartStopSelectivity, costEstimate
1385: .singleScanRowCount()
1386: * extraStartStopSelectivity);
1387:
1388: optimizer.trace(
1389: Optimizer.COST_INCLUDING_EXTRA_START_STOP,
1390: tableNumber, 0, 0.0, costEstimate);
1391: }
1392: }
1393:
1394: /*
1395: ** Figure out whether to do row locking or table locking.
1396: **
1397: ** If there are no start/stop predicates, we're doing full
1398: ** conglomerate scans, so do table locking.
1399: */
1400: if (!startStopFound) {
1401: currentAccessPath
1402: .setLockMode(TransactionController.MODE_TABLE);
1403:
1404: optimizer.trace(Optimizer.TABLE_LOCK_NO_START_STOP, 0,
1405: 0, 0.0, null);
1406: } else {
1407: /*
1408: ** Figure out the number of rows touched. If all the
1409: ** start/stop predicates are constant, the number of
1410: ** rows touched is the number of rows per scan.
1411: ** This is also true for join strategies that scan the
1412: ** inner table only once (like hash join) - we can
1413: ** tell if we have one of those, because
1414: ** multiplyBaseCostByOuterRows() will return false.
1415: */
1416: double rowsTouched = costEstimate.rowCount();
1417:
1418: if ((!constantStartStop)
1419: && currentJoinStrategy
1420: .multiplyBaseCostByOuterRows()) {
1421: /*
1422: ** This is a join where the inner table is scanned
1423: ** more than once, so we have to take the number
1424: ** of outer rows into account. The formula for this
1425: ** works out as follows:
1426: **
1427: ** total rows in table = r
1428: ** number of rows touched per scan = s
1429: ** number of outer rows = o
1430: ** proportion of rows touched per scan = s / r
1431: ** proportion of rows not touched per scan =
1432: ** 1 - (s / r)
1433: ** proportion of rows not touched for all scans =
1434: ** (1 - (s / r)) ** o
1435: ** proportion of rows touched for all scans =
1436: ** 1 - ((1 - (s / r)) ** o)
1437: ** total rows touched for all scans =
1438: ** r * (1 - ((1 - (s / r)) ** o))
1439: **
1440: ** In doing these calculations, we must be careful not
1441: ** to divide by zero. This could happen if there are
1442: ** no rows in the table. In this case, let's do table
1443: ** locking.
1444: */
1445: double r = baseRowCount();
1446: if (r > 0.0) {
1447: double s = costEstimate.rowCount();
1448: double o = outerCost.rowCount();
1449: double pRowsNotTouchedPerScan = 1.0 - (s / r);
1450: double pRowsNotTouchedAllScans = Math.pow(
1451: pRowsNotTouchedPerScan, o);
1452: double pRowsTouchedAllScans = 1.0 - pRowsNotTouchedAllScans;
1453: double rowsTouchedAllScans = r
1454: * pRowsTouchedAllScans;
1455:
1456: rowsTouched = rowsTouchedAllScans;
1457: } else {
1458: /* See comments in setLockingBasedOnThreshold */
1459: rowsTouched = optimizer.tableLockThreshold() + 1;
1460: }
1461: }
1462:
1463: setLockingBasedOnThreshold(optimizer, rowsTouched);
1464: }
1465:
1466: /*
1467: ** If the index isn't covering, add the cost of getting the
1468: ** base row. Only apply extraFirstColumnSelectivity and extraStartStopSelectivity
1469: ** before we do this, don't apply extraQualifierSelectivity etc. The
1470: ** reason is that the row count here should be the number of index rows
1471: ** (and hence heap rows) we get, and we need to fetch all those rows, even
1472: ** though later on some of them may be filtered out by other predicates.
1473: ** beetle 4787.
1474: */
1475: if (cd.isIndex() && (!isCoveringIndex(cd))) {
1476: double singleFetchCost = getBaseCostController()
1477: .getFetchFromRowLocationCost(
1478: (FormatableBitSet) null, 0);
1479:
1480: cost = singleFetchCost * costEstimate.rowCount();
1481:
1482: costEstimate.setEstimatedCost(costEstimate
1483: .getEstimatedCost()
1484: + cost);
1485:
1486: optimizer.trace(Optimizer.COST_OF_NONCOVERING_INDEX,
1487: tableNumber, 0, 0.0, costEstimate);
1488: }
1489:
1490: /* Factor in the extra qualifier selectivity (see comment above).
1491: * NOTE: In this case we want to apply the selectivity to both
1492: * the row count and singleScanRowCount.
1493: */
1494: if (extraQualifierSelectivity != 1.0d) {
1495: costEstimate.setCost(costEstimate.getEstimatedCost(),
1496: costEstimate.rowCount()
1497: * extraQualifierSelectivity,
1498: costEstimate.singleScanRowCount()
1499: * extraQualifierSelectivity);
1500:
1501: optimizer
1502: .trace(
1503: Optimizer.COST_INCLUDING_EXTRA_QUALIFIER_SELECTIVITY,
1504: tableNumber, 0, 0.0, costEstimate);
1505: }
1506:
1507: singleScanRowCount = costEstimate.singleScanRowCount();
1508:
1509: /*
1510: ** Let the join strategy decide whether the cost of the base
1511: ** scan is a single scan, or a scan per outer row.
1512: ** NOTE: In this case we only want to multiply against the
1513: ** total row count, not the singleScanRowCount.
1514: ** NOTE: Do not multiply row count if we determined that
1515: ** conglomerate is a 1 row result set when costing nested
1516: ** loop. (eg, we will find at most 1 match when probing
1517: ** the hash table.)
1518: */
1519: double newCost = costEstimate.getEstimatedCost();
1520: double rowCount = costEstimate.rowCount();
1521:
1522: /*
1523: ** RESOLVE - If there is a unique index on the joining
1524: ** columns, the number of matching rows will equal the
1525: ** number of outer rows, even if we're not considering the
1526: ** unique index for this access path. To figure that out,
1527: ** however, would require an analysis phase at the beginning
1528: ** of optimization. So, we'll always multiply the number
1529: ** of outer rows by the number of rows per scan. This will
1530: ** give us a higher than actual row count when there is
1531: ** such a unique index, which will bias the optimizer toward
1532: ** using the unique index. This is probably OK most of the
1533: ** time, since the optimizer would probably choose the
1534: ** unique index, anyway. But it would be better if the
1535: ** optimizer set the row count properly in this case.
1536: */
1537: if (currentJoinStrategy.multiplyBaseCostByOuterRows()) {
1538: newCost *= outerCost.rowCount();
1539: }
1540:
1541: rowCount *= outerCost.rowCount();
1542: initialRowCount *= outerCost.rowCount();
1543:
1544: /*
1545: ** If this table can generate at most one row per scan,
1546: ** the maximum row count is the number of outer rows.
1547: ** NOTE: This does not completely take care of the RESOLVE
1548: ** in the above comment, since it will only notice
1549: ** one-row result sets for the current join order.
1550: */
1551: if (oneRowResultSetForSomeConglom) {
1552: if (outerCost.rowCount() < rowCount) {
1553: rowCount = outerCost.rowCount();
1554: }
1555: }
1556:
1557: /*
1558: ** The estimated cost may be too high for indexes, if the
1559: ** estimated row count exceeds the maximum. Only do this
1560: ** if we're not doing a full scan, and the start/stop position
1561: ** is not constant (i.e. we're doing a join on the first column
1562: ** of the index) - the reason being that this is when the
1563: ** cost may be inaccurate.
1564: */
1565: if (cd.isIndex() && startStopFound && (!constantStartStop)) {
1566: /*
1567: ** Does any table outer to this one have a unique key on
1568: ** a subset of the joining columns? If so, the maximum number
1569: ** of rows that this table can return is the number of rows
1570: ** in this table times the number of times the maximum number
1571: ** of times each key can be repeated.
1572: */
1573: double scanUniquenessFactor = optimizer
1574: .uniqueJoinWithOuterTable(baseTableRestrictionList);
1575: if (scanUniquenessFactor > 0.0) {
1576: /*
1577: ** A positive uniqueness factor means there is a unique
1578: ** outer join key. The value is the reciprocal of the
1579: ** maximum number of duplicates for each unique key
1580: ** (the duplicates can be caused by other joining tables).
1581: */
1582: double maxRows = ((double) baseRowCount())
1583: / scanUniquenessFactor;
1584: if (rowCount > maxRows) {
1585: /*
1586: ** The estimated row count is too high. Adjust the
1587: ** estimated cost downwards proportionately to
1588: ** match the maximum number of rows.
1589: */
1590: newCost *= (maxRows / rowCount);
1591: }
1592: }
1593: }
1594:
1595: /* The estimated total row count may be too high */
1596: if (tableUniquenessFactor > 0.0) {
1597: /*
1598: ** A positive uniqueness factor means there is a unique outer
1599: ** join key. The value is the reciprocal of the maximum number
1600: ** of duplicates for each unique key (the duplicates can be
1601: ** caused by other joining tables).
1602: */
1603: double maxRows = ((double) baseRowCount())
1604: / tableUniquenessFactor;
1605: if (rowCount > maxRows) {
1606: /*
1607: ** The estimated row count is too high. Set it to the
1608: ** maximum row count.
1609: */
1610: rowCount = maxRows;
1611: }
1612: }
1613:
1614: costEstimate.setCost(newCost, rowCount, costEstimate
1615: .singleScanRowCount());
1616:
1617: optimizer.trace(Optimizer.COST_OF_N_SCANS, tableNumber, 0,
1618: outerCost.rowCount(), costEstimate);
1619:
1620: /*
1621: ** Now figure in the cost of the non-qualifier predicates.
1622: ** existsBaseTables have a row count of 1
1623: */
1624: double rc = -1, src = -1;
1625: if (existsBaseTable)
1626: rc = src = 1;
1627: // don't factor in extraNonQualifierSelectivity in case of oneRowResultSetForSomeConglom
1628: // because "1" is the final result and the effect of other predicates already considered
1629: // beetle 4787
1630: else if (extraNonQualifierSelectivity != 1.0d) {
1631: rc = oneRowResultSetForSomeConglom ? costEstimate
1632: .rowCount() : costEstimate.rowCount()
1633: * extraNonQualifierSelectivity;
1634: src = costEstimate.singleScanRowCount()
1635: * extraNonQualifierSelectivity;
1636: }
1637: if (rc != -1) // changed
1638: {
1639: costEstimate.setCost(costEstimate.getEstimatedCost(),
1640: rc, src);
1641: optimizer
1642: .trace(
1643: Optimizer.COST_INCLUDING_EXTRA_NONQUALIFIER_SELECTIVITY,
1644: tableNumber, 0, 0.0, costEstimate);
1645: }
1646:
1647: recomputeRowCount: if (statisticsForTable
1648: && !oneRowResultSetForSomeConglom
1649: && (statCompositeSelectivity != 1.0d)) {
1650: /* if we have statistics we should use statistics to calculate
1651: row count-- if it has been determined that this table
1652: returns one row for some conglomerate then there is no need
1653: to do this recalculation
1654: */
1655:
1656: double compositeStatRC = initialRowCount
1657: * statCompositeSelectivity;
1658: optimizer.trace(Optimizer.COMPOSITE_SEL_FROM_STATS, 0,
1659: 0, statCompositeSelectivity, null);
1660:
1661: if (tableUniquenessFactor > 0.0) {
1662: /* If the row count from the composite statistics
1663: comes up more than what the table uniqueness
1664: factor indicates then lets stick with the current
1665: row count.
1666: */
1667: if (compositeStatRC > (baseRowCount() * tableUniquenessFactor))
1668:
1669: {
1670:
1671: break recomputeRowCount;
1672: }
1673: }
1674:
1675: /* set the row count and the single scan row count
1676: to the initialRowCount. initialRowCount is the product
1677: of the RC from store * RC of the outerCost.
1678: Thus RC = initialRowCount * the selectivity from stats.
1679: SingleRC = RC / outerCost.rowCount().
1680: */
1681: costEstimate.setCost(costEstimate.getEstimatedCost(),
1682: compositeStatRC, (existsBaseTable) ? 1
1683: : compositeStatRC
1684: / outerCost.rowCount());
1685:
1686: optimizer
1687: .trace(
1688: Optimizer.COST_INCLUDING_COMPOSITE_SEL_FROM_STATS,
1689: tableNumber, 0, 0.0, costEstimate);
1690: }
1691: }
1692:
1693: /* Put the base predicates back in the predicate list */
1694: currentJoinStrategy.putBasePredicates(predList,
1695: baseTableRestrictionList);
1696: return costEstimate;
1697: }
1698:
1699: private double scanCostAfterSelectivity(double originalScanCost,
1700: double initialPositionCost, double selectivity,
1701: boolean anotherIndexUnique) throws StandardException {
1702: /* If there's another paln using unique index, its selectivity is 1/r
1703: * because we use row count 1. This plan is not unique index, so we make
1704: * selectivity at least 2/r, which is more fair, because for unique index
1705: * we don't use our selectivity estimates. Unique index also more likely
1706: * locks less rows, hence better concurrency. beetle 4787.
1707: */
1708: if (anotherIndexUnique) {
1709: double r = baseRowCount();
1710: if (r > 0.0) {
1711: double minSelectivity = 2.0 / r;
1712: if (minSelectivity > selectivity)
1713: selectivity = minSelectivity;
1714: }
1715: }
1716:
1717: /* initialPositionCost is the first part of the index scan cost we get above.
1718: * It's the cost of initial positioning/fetch of key. So it's unrelated to
1719: * row count of how many rows we fetch from index. We extract it here so that
1720: * we only multiply selectivity to the other part of index scan cost, which is
1721: * nearly linear, to make cost calculation more accurate and fair, especially
1722: * compared to the plan of "one row result set" (unique index). beetle 4787.
1723: */
1724: double afterInitialCost = (originalScanCost - initialPositionCost)
1725: * selectivity;
1726: if (afterInitialCost < 0)
1727: afterInitialCost = 0;
1728: return initialPositionCost + afterInitialCost;
1729: }
1730:
1731: private void setLockingBasedOnThreshold(Optimizer optimizer,
1732: double rowsTouched) {
1733: /* In optimizer we always set it to row lock (unless there's no
1734: * start/stop key found to utilize an index, in which case we do table
1735: * lock), it's up to store to upgrade it to table lock. This makes
1736: * sense for the default read committed isolation level and update
1737: * lock. For more detail, see Beetle 4133.
1738: */
1739: getCurrentAccessPath().setLockMode(
1740: TransactionController.MODE_RECORD);
1741: }
1742:
1743: /** @see Optimizable#isBaseTable */
1744: public boolean isBaseTable() {
1745: return true;
1746: }
1747:
1748: /** @see Optimizable#forUpdate */
1749: public boolean forUpdate() {
1750: /* This table is updatable if it is the
1751: * target table of an update or delete,
1752: * or it is (or was) the target table of an
1753: * updatable cursor.
1754: */
1755: return (updateOrDelete != 0) || cursorTargetTable
1756: || getUpdateLocks;
1757: }
1758:
1759: /** @see Optimizable#initialCapacity */
1760: public int initialCapacity() {
1761: return initialCapacity;
1762: }
1763:
1764: /** @see Optimizable#loadFactor */
1765: public float loadFactor() {
1766: return loadFactor;
1767: }
1768:
1769: /**
1770: * @see Optimizable#memoryUsageOK
1771: */
1772: public boolean memoryUsageOK(double rowCount, int maxMemoryPerTable)
1773: throws StandardException {
1774: return super .memoryUsageOK(singleScanRowCount,
1775: maxMemoryPerTable);
1776: }
1777:
1778: /**
1779: * @see Optimizable#isTargetTable
1780: */
1781: public boolean isTargetTable() {
1782: return (updateOrDelete != 0);
1783: }
1784:
1785: /**
1786: * @see Optimizable#uniqueJoin
1787: */
1788: public double uniqueJoin(OptimizablePredicateList predList)
1789: throws StandardException {
1790: double retval = -1.0;
1791: PredicateList pl = (PredicateList) predList;
1792: int numColumns = getTableDescriptor().getNumberOfColumns();
1793: int tableNumber = getTableNumber();
1794:
1795: // This is supposed to be an array of table numbers for the current
1796: // query block. It is used to determine whether a join is with a
1797: // correlation column, to fill in eqOuterCols properly. We don't care
1798: // about eqOuterCols, so just create a zero-length array, pretending
1799: // that all columns are correlation columns.
1800: int[] tableNumbers = new int[0];
1801: JBitSet[] tableColMap = new JBitSet[1];
1802: tableColMap[0] = new JBitSet(numColumns + 1);
1803:
1804: pl.checkTopPredicatesForEqualsConditions(tableNumber, null,
1805: tableNumbers, tableColMap, false);
1806:
1807: if (super setOfUniqueIndex(tableColMap)) {
1808: retval = getBestAccessPath().getCostEstimate()
1809: .singleScanRowCount();
1810: }
1811:
1812: return retval;
1813: }
1814:
1815: /**
1816: * @see Optimizable#isOneRowScan
1817: *
1818: * @exception StandardException Thrown on error
1819: */
1820: public boolean isOneRowScan() throws StandardException {
1821: /* EXISTS FBT will never be a 1 row scan.
1822: * Otherwise call method in super class.
1823: */
1824: if (existsBaseTable) {
1825: return false;
1826: }
1827:
1828: return super .isOneRowScan();
1829: }
1830:
1831: /**
1832: * @see Optimizable#legalJoinOrder
1833: */
1834: public boolean legalJoinOrder(JBitSet assignedTableMap) {
1835: // Only an issue for EXISTS FBTs
1836: if (existsBaseTable) {
1837: /* Have all of our dependencies been satisfied? */
1838: return assignedTableMap.contains(dependencyMap);
1839: }
1840: return true;
1841: }
1842:
1843: /**
1844: * Convert this object to a String. See comments in QueryTreeNode.java
1845: * for how this should be done for tree printing.
1846: *
1847: * @return This object as a String
1848: */
1849:
1850: public String toString() {
1851: if (SanityManager.DEBUG) {
1852: return "tableName: "
1853: + (tableName != null ? tableName.toString()
1854: : "null")
1855: + "\n"
1856: + "tableDescriptor: "
1857: + tableDescriptor
1858: + "\n"
1859: + "updateOrDelete: "
1860: + updateOrDelete
1861: + "\n"
1862: + (tableProperties != null ? tableProperties
1863: .toString() : "null")
1864: + "\n"
1865: + "existsBaseTable: "
1866: + existsBaseTable
1867: + "\n"
1868: + "dependencyMap: "
1869: + (dependencyMap != null ? dependencyMap.toString()
1870: : "null") + "\n" + super .toString();
1871: } else {
1872: return "";
1873: }
1874: }
1875:
1876: /**
1877: * Does this FBT represent an EXISTS FBT.
1878: *
1879: * @return Whether or not this FBT represents
1880: * an EXISTS FBT.
1881: */
1882: boolean getExistsBaseTable() {
1883: return existsBaseTable;
1884: }
1885:
1886: /**
1887: * Set whether or not this FBT represents an
1888: * EXISTS FBT.
1889: *
1890: * @param existsBaseTable Whether or not an EXISTS FBT.
1891: * @param dependencyMap The dependency map for the EXISTS FBT.
1892: * @param isNotExists Whether or not for NOT EXISTS, more specifically.
1893: */
1894: void setExistsBaseTable(boolean existsBaseTable,
1895: JBitSet dependencyMap, boolean isNotExists) {
1896: this .existsBaseTable = existsBaseTable;
1897: this .isNotExists = isNotExists;
1898:
1899: /* Set/clear the dependency map as needed */
1900: if (existsBaseTable) {
1901: this .dependencyMap = dependencyMap;
1902: } else {
1903: this .dependencyMap = null;
1904: }
1905: }
1906:
1907: /**
1908: * Clear the bits from the dependency map when join nodes are flattened
1909: *
1910: * @param locations vector of bit numbers to be cleared
1911: */
1912: void clearDependency(Vector locations) {
1913: if (this .dependencyMap != null) {
1914: for (int i = 0; i < locations.size(); i++)
1915: this .dependencyMap.clear(((Integer) locations
1916: .elementAt(i)).intValue());
1917: }
1918: }
1919:
1920: /**
1921: * Set the table properties for this table.
1922: *
1923: * @param tableProperties The new table properties.
1924: */
1925: public void setTableProperties(Properties tableProperties) {
1926: this .tableProperties = tableProperties;
1927: }
1928:
1929: /**
1930: * Bind the table in this FromBaseTable.
1931: * This is where view resolution occurs
1932: *
1933: * @param dataDictionary The DataDictionary to use for binding
1934: * @param fromListParam FromList to use/append to.
1935: *
1936: * @return ResultSetNode The FromTable for the table or resolved view.
1937: *
1938: * @exception StandardException Thrown on error
1939: */
1940:
1941: public ResultSetNode bindNonVTITables(
1942: DataDictionary dataDictionary, FromList fromListParam)
1943: throws StandardException {
1944: TableDescriptor tableDescriptor = bindTableDescriptor();
1945:
1946: if (tableDescriptor.getTableType() == TableDescriptor.VTI_TYPE) {
1947: ResultSetNode vtiNode = getNodeFactory().mapTableAsVTI(
1948: tableDescriptor,
1949: dataDictionary.getVTIClass(tableDescriptor),
1950: getCorrelationName(), resultColumns,
1951: getProperties(), getContextManager());
1952: return vtiNode.bindNonVTITables(dataDictionary,
1953: fromListParam);
1954: }
1955:
1956: ResultColumnList derivedRCL = resultColumns;
1957:
1958: // make sure there's a restriction list
1959: restrictionList = (PredicateList) getNodeFactory().getNode(
1960: C_NodeTypes.PREDICATE_LIST, getContextManager());
1961: baseTableRestrictionList = (PredicateList) getNodeFactory()
1962: .getNode(C_NodeTypes.PREDICATE_LIST,
1963: getContextManager());
1964:
1965: CompilerContext compilerContext = getCompilerContext();
1966:
1967: /* Generate the ResultColumnList */
1968: resultColumns = genResultColList();
1969: templateColumns = resultColumns;
1970:
1971: /* Resolve the view, if this is a view */
1972: if (tableDescriptor.getTableType() == TableDescriptor.VIEW_TYPE) {
1973: FromTable fsq;
1974: ResultSetNode rsn;
1975: ViewDescriptor vd;
1976: CreateViewNode cvn;
1977: SchemaDescriptor compSchema;
1978: SchemaDescriptor prevCompSchema;
1979:
1980: /* Get the associated ViewDescriptor so that we can get
1981: * the view definition text.
1982: */
1983: vd = dataDictionary.getViewDescriptor(tableDescriptor);
1984:
1985: /*
1986: ** Set the default compilation schema to be whatever
1987: ** this schema this view was originally compiled against.
1988: ** That way we pick up the same tables no matter what
1989: ** schema we are running against.
1990: */
1991: compSchema = dataDictionary.getSchemaDescriptor(vd
1992: .getCompSchemaId(), null);
1993:
1994: prevCompSchema = compilerContext
1995: .setCompilationSchema(compSchema);
1996:
1997: try {
1998:
1999: /* This represents a view - query is dependent on the ViewDescriptor */
2000: compilerContext.createDependency(vd);
2001:
2002: if (SanityManager.DEBUG) {
2003: SanityManager.ASSERT(vd != null,
2004: "vd not expected to be null for "
2005: + tableName);
2006: }
2007:
2008: /*
2009: ** Push a compiler context to parse the query text so that
2010: ** it won't clobber the current context.
2011: */
2012: LanguageConnectionContext lcc = getLanguageConnectionContext();
2013: CompilerContext newCC = lcc.pushCompilerContext();
2014: cvn = (CreateViewNode) QueryTreeNode.parseQueryText(
2015: newCC, vd.getViewText(),
2016: (DataValueDescriptor[]) null, // default params
2017: lcc);
2018:
2019: lcc.popCompilerContext(newCC);
2020:
2021: rsn = cvn.getParsedQueryExpression();
2022:
2023: /* If the view contains a '*' then we mark the views derived column list
2024: * so that the view will still work, and return the expected results,
2025: * if any of the tables referenced in the view have columns added to
2026: * them via ALTER TABLE. The expected results means that the view
2027: * will always return the same # of columns.
2028: */
2029: if (rsn.getResultColumns().containsAllResultColumn()) {
2030: resultColumns.setCountMismatchAllowed(true);
2031: }
2032: //Views execute with definer's privileges and if any one of
2033: //those privileges' are revoked from the definer, the view gets
2034: //dropped. So, a view can exist in Derby only if it's owner has
2035: //all the privileges needed to create one. In order to do a
2036: //select from a view, a user only needs select privilege on the
2037: //view and doesn't need any privilege for objects accessed by
2038: //the view. Hence, when collecting privilege requirement for a
2039: //sql accessing a view, we only need to look for select privilege
2040: //on the actual view and that is what the following code is
2041: //checking.
2042: for (int i = 0; i < resultColumns.size(); i++) {
2043: ResultColumn rc = (ResultColumn) resultColumns
2044: .elementAt(i);
2045: if (rc.isPrivilegeCollectionRequired())
2046: compilerContext.addRequiredColumnPriv(rc
2047: .getTableColumnDescriptor());
2048: }
2049:
2050: fsq = (FromTable) getNodeFactory().getNode(
2051: C_NodeTypes.FROM_SUBQUERY,
2052: rsn,
2053: (correlationName != null) ? correlationName
2054: : getOrigTableName().getTableName(),
2055: resultColumns, tableProperties,
2056: getContextManager());
2057: // Transfer the nesting level to the new FromSubquery
2058: fsq.setLevel(level);
2059: //We are getting ready to bind the query underneath the view. Since
2060: //that query is going to run with definer's privileges, we do not
2061: //need to collect any privilege requirement for that query.
2062: //Following call is marking the query to run with definer
2063: //privileges. This marking will make sure that we do not collect
2064: //any privilege requirement for it.
2065: fsq.disablePrivilegeCollection();
2066: fsq.setOrigTableName(this .getOrigTableName());
2067: return fsq.bindNonVTITables(dataDictionary,
2068: fromListParam);
2069: } finally {
2070: compilerContext.setCompilationSchema(prevCompSchema);
2071: }
2072: } else {
2073: /* This represents a table - query is dependent on the TableDescriptor */
2074: compilerContext.createDependency(tableDescriptor);
2075:
2076: /* Get the base conglomerate descriptor */
2077: baseConglomerateDescriptor = tableDescriptor
2078: .getConglomerateDescriptor(tableDescriptor
2079: .getHeapConglomerateId());
2080:
2081: /* Build the 0-based array of base column names. */
2082: columnNames = resultColumns.getColumnNames();
2083:
2084: /* Do error checking on derived column list and update "exposed"
2085: * column names if valid.
2086: */
2087: if (derivedRCL != null) {
2088: resultColumns.propagateDCLInfo(derivedRCL,
2089: origTableName.getFullTableName());
2090: }
2091:
2092: /* Assign the tableNumber */
2093: if (tableNumber == -1) // allow re-bind, in which case use old number
2094: tableNumber = compilerContext.getNextTableNumber();
2095: }
2096:
2097: return this ;
2098: }
2099:
2100: /**
2101: * Determine whether or not the specified name is an exposed name in
2102: * the current query block.
2103: *
2104: * @param name The specified name to search for as an exposed name.
2105: * @param schemaName Schema name, if non-null.
2106: * @param exactMatch Whether or not we need an exact match on specified schema and table
2107: * names or match on table id.
2108: *
2109: * @return The FromTable, if any, with the exposed name.
2110: *
2111: * @exception StandardException Thrown on error
2112: */
2113: protected FromTable getFromTableByName(String name,
2114: String schemaName, boolean exactMatch)
2115: throws StandardException {
2116: // ourSchemaName can be null if correlation name is specified.
2117: String ourSchemaName = getOrigTableName().getSchemaName();
2118: String fullName = (schemaName != null) ? (schemaName + '.' + name)
2119: : name;
2120:
2121: /* If an exact string match is required then:
2122: * o If schema name specified on 1 but not both then no match.
2123: * o If schema name not specified on either, compare exposed names.
2124: * o If schema name specified on both, compare schema and exposed names.
2125: */
2126: if (exactMatch) {
2127:
2128: if ((schemaName != null && ourSchemaName == null)
2129: || (schemaName == null && ourSchemaName != null)) {
2130: return null;
2131: }
2132:
2133: if (getExposedName().equals(fullName)) {
2134: return this ;
2135: }
2136:
2137: return null;
2138: }
2139:
2140: /* If an exact string match is not required then:
2141: * o If schema name specified on both, compare schema and exposed names.
2142: * o If schema name not specified on either, compare exposed names.
2143: * o If schema name specified on column but not table, then compare
2144: * the column's schema name against the schema name from the TableDescriptor.
2145: * If they agree, then the column's table name must match the exposed name
2146: * from the table, which must also be the base table name, since a correlation
2147: * name does not belong to a schema.
2148: * o If schema name not specified on column then just match the exposed names.
2149: */
2150: // Both or neither schema name specified
2151: if (getExposedName().equals(fullName)) {
2152: return this ;
2153: } else if ((schemaName != null && ourSchemaName != null)
2154: || (schemaName == null && ourSchemaName == null)) {
2155: return null;
2156: }
2157:
2158: // Schema name only on column
2159: // e.g.: select w1.i from t1 w1 order by test2.w1.i; (incorrect)
2160: if (schemaName != null && ourSchemaName == null) {
2161: // Compare column's schema name with table descriptor's if it is
2162: // not a synonym since a synonym can be declared in a different
2163: // schema.
2164: if (tableName.equals(origTableName)
2165: && !schemaName.equals(tableDescriptor
2166: .getSchemaDescriptor().getSchemaName())) {
2167: return null;
2168: }
2169:
2170: // Compare exposed name with column's table name
2171: if (!getExposedName().equals(name)) {
2172: return null;
2173: }
2174:
2175: // Make sure exposed name is not a correlation name
2176: if (!getExposedName().equals(
2177: getOrigTableName().getTableName())) {
2178: return null;
2179: }
2180:
2181: return this ;
2182: }
2183:
2184: /* Schema name only specified on table. Compare full exposed name
2185: * against table's schema name || "." || column's table name.
2186: */
2187: if (!getExposedName().equals(
2188: getOrigTableName().getSchemaName() + "." + name)) {
2189: return null;
2190: }
2191:
2192: return this ;
2193: }
2194:
2195: /**
2196: * Bind the table descriptor for this table.
2197: *
2198: * If the tableName is a synonym, it will be resolved here.
2199: * The original table name is retained in origTableName.
2200: *
2201: * @exception StandardException Thrown on error
2202: */
2203: private TableDescriptor bindTableDescriptor()
2204: throws StandardException {
2205: String schemaName = tableName.getSchemaName();
2206: SchemaDescriptor sd = getSchemaDescriptor(schemaName);
2207:
2208: tableDescriptor = getTableDescriptor(tableName.getTableName(),
2209: sd);
2210: if (tableDescriptor == null) {
2211: // Check if the reference is for a synonym.
2212: TableName synonymTab = resolveTableToSynonym(tableName);
2213: if (synonymTab == null)
2214: throw StandardException.newException(
2215: SQLState.LANG_TABLE_NOT_FOUND, tableName);
2216:
2217: tableName = synonymTab;
2218: sd = getSchemaDescriptor(tableName.getSchemaName());
2219:
2220: tableDescriptor = getTableDescriptor(synonymTab
2221: .getTableName(), sd);
2222: if (tableDescriptor == null)
2223: throw StandardException.newException(
2224: SQLState.LANG_TABLE_NOT_FOUND, tableName);
2225: }
2226:
2227: return tableDescriptor;
2228: }
2229:
2230: /**
2231: * Bind the expressions in this FromBaseTable. This means binding the
2232: * sub-expressions, as well as figuring out what the return type is for
2233: * each expression.
2234: *
2235: * @param fromListParam FromList to use/append to.
2236: *
2237: * @exception StandardException Thrown on error
2238: */
2239: public void bindExpressions(FromList fromListParam)
2240: throws StandardException {
2241: /* No expressions to bind for a FromBaseTable.
2242: * NOTE - too involved to optimize so that this method
2243: * doesn't get called, so just do nothing.
2244: */
2245: }
2246:
2247: /**
2248: * Bind the result columns of this ResultSetNode when there is no
2249: * base table to bind them to. This is useful for SELECT statements,
2250: * where the result columns get their types from the expressions that
2251: * live under them.
2252: *
2253: * @param fromListParam FromList to use/append to.
2254: *
2255: * @exception StandardException Thrown on error
2256: */
2257:
2258: public void bindResultColumns(FromList fromListParam)
2259: throws StandardException {
2260: /* Nothing to do, since RCL bound in bindNonVTITables() */
2261: }
2262:
2263: /**
2264: * Try to find a ResultColumn in the table represented by this FromBaseTable
2265: * that matches the name in the given ColumnReference.
2266: *
2267: * @param columnReference The columnReference whose name we're looking
2268: * for in the given table.
2269: *
2270: * @return A ResultColumn whose expression is the ColumnNode
2271: * that matches the ColumnReference.
2272: * Returns null if there is no match.
2273: *
2274: * @exception StandardException Thrown on error
2275: */
2276:
2277: public ResultColumn getMatchingColumn(
2278: ColumnReference columnReference) throws StandardException {
2279: ResultColumn resultColumn = null;
2280: TableName columnsTableName;
2281: TableName exposedTableName;
2282:
2283: columnsTableName = columnReference.getTableNameNode();
2284:
2285: if (columnsTableName != null) {
2286: if (columnsTableName.getSchemaName() == null
2287: && correlationName == null)
2288: columnsTableName.bind(this .getDataDictionary());
2289: }
2290: /*
2291: ** If there is a correlation name, use that instead of the
2292: ** table name.
2293: */
2294: exposedTableName = getExposedTableName();
2295:
2296: if (exposedTableName.getSchemaName() == null
2297: && correlationName == null)
2298: exposedTableName.bind(this .getDataDictionary());
2299: /*
2300: ** If the column did not specify a name, or the specified name
2301: ** matches the table we're looking at, see whether the column
2302: ** is in this table.
2303: */
2304: if (columnsTableName == null
2305: || columnsTableName.equals(exposedTableName)) {
2306: resultColumn = resultColumns
2307: .getResultColumn(columnReference.getColumnName());
2308: /* Did we find a match? */
2309: if (resultColumn != null) {
2310: columnReference.setTableNumber(tableNumber);
2311: if (tableDescriptor != null) {
2312: FormatableBitSet referencedColumnMap = tableDescriptor
2313: .getReferencedColumnMap();
2314: if (referencedColumnMap == null)
2315: referencedColumnMap = new FormatableBitSet(
2316: tableDescriptor.getNumberOfColumns() + 1);
2317: referencedColumnMap.set(resultColumn
2318: .getColumnPosition());
2319: tableDescriptor
2320: .setReferencedColumnMap(referencedColumnMap);
2321: }
2322: }
2323: }
2324:
2325: return resultColumn;
2326: }
2327:
2328: /**
2329: * Preprocess a ResultSetNode - this currently means:
2330: * o Generating a referenced table map for each ResultSetNode.
2331: * o Putting the WHERE and HAVING clauses in conjunctive normal form (CNF).
2332: * o Converting the WHERE and HAVING clauses into PredicateLists and
2333: * classifying them.
2334: * o Ensuring that a ProjectRestrictNode is generated on top of every
2335: * FromBaseTable and generated in place of every FromSubquery.
2336: * o Pushing single table predicates down to the new ProjectRestrictNodes.
2337: *
2338: * @param numTables The number of tables in the DML Statement
2339: * @param gbl The group by list, if any
2340: * @param fromList The from list, if any
2341: *
2342: * @return ResultSetNode at top of preprocessed tree.
2343: *
2344: * @exception StandardException Thrown on error
2345: */
2346:
2347: public ResultSetNode preprocess(int numTables, GroupByList gbl,
2348: FromList fromList) throws StandardException {
2349: /* Generate the referenced table map */
2350: referencedTableMap = new JBitSet(numTables);
2351: referencedTableMap.set(tableNumber);
2352:
2353: return genProjectRestrict(numTables);
2354: }
2355:
2356: /**
2357: * Put a ProjectRestrictNode on top of each FromTable in the FromList.
2358: * ColumnReferences must continue to point to the same ResultColumn, so
2359: * that ResultColumn must percolate up to the new PRN. However,
2360: * that ResultColumn will point to a new expression, a VirtualColumnNode,
2361: * which points to the FromTable and the ResultColumn that is the source for
2362: * the ColumnReference.
2363: * (The new PRN will have the original of the ResultColumnList and
2364: * the ResultColumns from that list. The FromTable will get shallow copies
2365: * of the ResultColumnList and its ResultColumns. ResultColumn.expression
2366: * will remain at the FromTable, with the PRN getting a new
2367: * VirtualColumnNode for each ResultColumn.expression.)
2368: * We then project out the non-referenced columns. If there are no referenced
2369: * columns, then the PRN's ResultColumnList will consist of a single ResultColumn
2370: * whose expression is 1.
2371: *
2372: * @param numTables Number of tables in the DML Statement
2373: *
2374: * @return The generated ProjectRestrictNode atop the original FromTable.
2375: *
2376: * @exception StandardException Thrown on error
2377: */
2378:
2379: protected ResultSetNode genProjectRestrict(int numTables)
2380: throws StandardException {
2381: /* We get a shallow copy of the ResultColumnList and its
2382: * ResultColumns. (Copy maintains ResultColumn.expression for now.)
2383: */
2384: ResultColumnList prRCList = resultColumns;
2385: resultColumns = resultColumns.copyListAndObjects();
2386:
2387: /* Replace ResultColumn.expression with new VirtualColumnNodes
2388: * in the ProjectRestrictNode's ResultColumnList. (VirtualColumnNodes include
2389: * pointers to source ResultSetNode, this, and source ResultColumn.)
2390: * NOTE: We don't want to mark the underlying RCs as referenced, otherwise
2391: * we won't be able to project out any of them.
2392: */
2393: prRCList.genVirtualColumnNodes(this , resultColumns, false);
2394:
2395: /* Project out any unreferenced columns. If there are no referenced
2396: * columns, generate and bind a single ResultColumn whose expression is 1.
2397: */
2398: prRCList.doProjection();
2399:
2400: /* Finally, we create the new ProjectRestrictNode */
2401: return (ResultSetNode) getNodeFactory().getNode(
2402: C_NodeTypes.PROJECT_RESTRICT_NODE, this , prRCList,
2403: null, /* Restriction */
2404: null, /* Restriction as PredicateList */
2405: null, /* Project subquery list */
2406: null, /* Restrict subquery list */
2407: null, getContextManager());
2408: }
2409:
2410: /**
2411: * @see ResultSetNode#changeAccessPath
2412: *
2413: * @exception StandardException Thrown on error
2414: */
2415: public ResultSetNode changeAccessPath() throws StandardException {
2416: ResultSetNode retval;
2417: AccessPath ap = getTrulyTheBestAccessPath();
2418: ConglomerateDescriptor trulyTheBestConglomerateDescriptor = ap
2419: .getConglomerateDescriptor();
2420: JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
2421: Optimizer optimizer = ap.getOptimizer();
2422:
2423: optimizer.trace(Optimizer.CHANGING_ACCESS_PATH_FOR_TABLE,
2424: tableNumber, 0, 0.0, null);
2425:
2426: if (SanityManager.DEBUG) {
2427: SanityManager
2428: .ASSERT(trulyTheBestConglomerateDescriptor != null,
2429: "Should only modify access path after conglomerate has been chosen.");
2430: }
2431:
2432: /*
2433: ** Make sure user-specified bulk fetch is OK with the chosen join
2434: ** strategy.
2435: */
2436: if (bulkFetch != UNSET) {
2437: if (!trulyTheBestJoinStrategy.bulkFetchOK()) {
2438: throw StandardException
2439: .newException(
2440: SQLState.LANG_INVALID_BULK_FETCH_WITH_JOIN_TYPE,
2441: trulyTheBestJoinStrategy.getName());
2442: }
2443: // bulkFetch has no meaning for hash join, just ignore it
2444: else if (trulyTheBestJoinStrategy.ignoreBulkFetch()) {
2445: disableBulkFetch();
2446: }
2447: // bug 4431 - ignore bulkfetch property if it's 1 row resultset
2448: else if (isOneRowResultSet()) {
2449: disableBulkFetch();
2450: }
2451: }
2452:
2453: // bulkFetch = 1 is the same as no bulk fetch
2454: if (bulkFetch == 1) {
2455: disableBulkFetch();
2456: }
2457:
2458: /* Remove any redundant join clauses. A redundant join clause is one
2459: * where there are other join clauses in the same equivalence class
2460: * after it in the PredicateList.
2461: */
2462: restrictionList.removeRedundantPredicates();
2463:
2464: /*
2465: ** Divide up the predicates for different processing phases of the
2466: ** best join strategy.
2467: */
2468: storeRestrictionList = (PredicateList) getNodeFactory()
2469: .getNode(C_NodeTypes.PREDICATE_LIST,
2470: getContextManager());
2471: nonStoreRestrictionList = (PredicateList) getNodeFactory()
2472: .getNode(C_NodeTypes.PREDICATE_LIST,
2473: getContextManager());
2474: requalificationRestrictionList = (PredicateList) getNodeFactory()
2475: .getNode(C_NodeTypes.PREDICATE_LIST,
2476: getContextManager());
2477: trulyTheBestJoinStrategy.divideUpPredicateLists(this ,
2478: restrictionList, storeRestrictionList,
2479: nonStoreRestrictionList,
2480: requalificationRestrictionList, getDataDictionary());
2481:
2482: /*
2483: ** Consider turning on bulkFetch if it is turned
2484: ** off. Only turn it on if it is a not an updatable
2485: ** scan and if it isn't a oneRowResultSet, and
2486: ** not a subquery, and it is OK to use bulk fetch
2487: ** with the chosen join strategy. NOTE: the subquery logic
2488: ** could be more sophisticated -- we are taking
2489: ** the safe route in avoiding reading extra
2490: ** data for something like:
2491: **
2492: ** select x from t where x in (select y from t)
2493: **
2494: ** In this case we want to stop the subquery
2495: ** evaluation as soon as something matches.
2496: */
2497: if (trulyTheBestJoinStrategy.bulkFetchOK()
2498: && !(trulyTheBestJoinStrategy.ignoreBulkFetch())
2499: && !bulkFetchTurnedOff && (bulkFetch == UNSET)
2500: && !forUpdate() && !isOneRowResultSet()
2501: && getLevel() == 0) {
2502: bulkFetch = getDefaultBulkFetch();
2503: }
2504:
2505: /* Statement is dependent on the chosen conglomerate. */
2506: getCompilerContext().createDependency(
2507: trulyTheBestConglomerateDescriptor);
2508:
2509: /* No need to modify access path if conglomerate is the heap */
2510: if (!trulyTheBestConglomerateDescriptor.isIndex()) {
2511: /*
2512: ** We need a little special logic for SYSSTATEMENTS
2513: ** here. SYSSTATEMENTS has a hidden column at the
2514: ** end. When someone does a select * we don't want
2515: ** to get that column from the store. So we'll always
2516: ** generate a partial read bitSet if we are scanning
2517: ** SYSSTATEMENTS to ensure we don't get the hidden
2518: ** column.
2519: */
2520: boolean isSysstatements = tableName.equals("SYS",
2521: "SYSSTATEMENTS");
2522: /* Template must reflect full row.
2523: * Compact RCL down to partial row.
2524: */
2525: templateColumns = resultColumns;
2526: referencedCols = resultColumns
2527: .getReferencedFormatableBitSet(cursorTargetTable,
2528: isSysstatements, false);
2529: resultColumns = resultColumns.compactColumns(
2530: cursorTargetTable, isSysstatements);
2531: return this ;
2532: }
2533:
2534: /* No need to go to the data page if this is a covering index */
2535: /* Derby-1087: use data page when returning an updatable resultset */
2536: if (ap.getCoveringIndexScan() && (!cursorTargetTable())) {
2537: /* Massage resultColumns so that it matches the index. */
2538: resultColumns = newResultColumns(resultColumns,
2539: trulyTheBestConglomerateDescriptor,
2540: baseConglomerateDescriptor, false);
2541:
2542: /* We are going against the index. The template row must be the full index row.
2543: * The template row will have the RID but the result row will not
2544: * since there is no need to go to the data page.
2545: */
2546: templateColumns = newResultColumns(resultColumns,
2547: trulyTheBestConglomerateDescriptor,
2548: baseConglomerateDescriptor, false);
2549: templateColumns.addRCForRID();
2550:
2551: // If this is for update then we need to get the RID in the result row
2552: if (forUpdate()) {
2553: resultColumns.addRCForRID();
2554: }
2555:
2556: /* Compact RCL down to the partial row. We always want a new
2557: * RCL and FormatableBitSet because this is a covering index. (This is
2558: * because we don't want the RID in the partial row returned
2559: * by the store.)
2560: */
2561: referencedCols = resultColumns
2562: .getReferencedFormatableBitSet(cursorTargetTable,
2563: true, false);
2564: resultColumns = resultColumns.compactColumns(
2565: cursorTargetTable, true);
2566:
2567: resultColumns.setIndexRow(baseConglomerateDescriptor
2568: .getConglomerateNumber(), forUpdate());
2569:
2570: return this ;
2571: }
2572:
2573: /* Statement is dependent on the base conglomerate if this is
2574: * a non-covering index.
2575: */
2576: getCompilerContext().createDependency(
2577: baseConglomerateDescriptor);
2578:
2579: /*
2580: ** On bulkFetch, we need to add the restrictions from
2581: ** the TableScan and reapply them here.
2582: */
2583: if (bulkFetch != UNSET) {
2584: restrictionList
2585: .copyPredicatesToOtherList(requalificationRestrictionList);
2586: }
2587:
2588: /*
2589: ** We know the chosen conglomerate is an index. We need to allocate
2590: ** an IndexToBaseRowNode above us, and to change the result column
2591: ** list for this FromBaseTable to reflect the columns in the index.
2592: ** We also need to shift "cursor target table" status from this
2593: ** FromBaseTable to the new IndexToBaseRowNow (because that's where
2594: ** a cursor can fetch the current row).
2595: */
2596: ResultColumnList newResultColumns = newResultColumns(
2597: resultColumns, trulyTheBestConglomerateDescriptor,
2598: baseConglomerateDescriptor, true);
2599:
2600: /* Compact the RCL for the IndexToBaseRowNode down to
2601: * the partial row for the heap. The referenced BitSet
2602: * will reflect only those columns coming from the heap.
2603: * (ie, it won't reflect columns coming from the index.)
2604: * NOTE: We need to re-get all of the columns from the heap
2605: * when doing a bulk fetch because we will be requalifying
2606: * the row in the IndexRowToBaseRow.
2607: */
2608: // Get the BitSet for all of the referenced columns
2609: FormatableBitSet indexReferencedCols = null;
2610: FormatableBitSet heapReferencedCols = null;
2611: if ((bulkFetch == UNSET)
2612: && (requalificationRestrictionList == null || requalificationRestrictionList
2613: .size() == 0)) {
2614: /* No BULK FETCH or requalification, XOR off the columns coming from the heap
2615: * to get the columns coming from the index.
2616: */
2617: indexReferencedCols = resultColumns
2618: .getReferencedFormatableBitSet(cursorTargetTable,
2619: true, false);
2620: heapReferencedCols = resultColumns
2621: .getReferencedFormatableBitSet(cursorTargetTable,
2622: true, true);
2623: if (heapReferencedCols != null) {
2624: indexReferencedCols.xor(heapReferencedCols);
2625: }
2626: } else {
2627: // BULK FETCH or requalification - re-get all referenced columns from the heap
2628: heapReferencedCols = resultColumns
2629: .getReferencedFormatableBitSet(cursorTargetTable,
2630: true, false);
2631: }
2632: ResultColumnList heapRCL = resultColumns.compactColumns(
2633: cursorTargetTable, false);
2634: retval = (ResultSetNode) getNodeFactory().getNode(
2635: C_NodeTypes.INDEX_TO_BASE_ROW_NODE, this ,
2636: baseConglomerateDescriptor, heapRCL,
2637: new Boolean(cursorTargetTable), heapReferencedCols,
2638: indexReferencedCols, requalificationRestrictionList,
2639: new Boolean(forUpdate()), tableProperties,
2640: getContextManager());
2641:
2642: /*
2643: ** The template row is all the columns. The
2644: ** result set is the compacted column list.
2645: */
2646: resultColumns = newResultColumns;
2647:
2648: templateColumns = newResultColumns(resultColumns,
2649: trulyTheBestConglomerateDescriptor,
2650: baseConglomerateDescriptor, false);
2651: /* Since we are doing a non-covered index scan, if bulkFetch is on, then
2652: * the only columns that we need to get are those columns referenced in the start and stop positions
2653: * and the qualifiers (and the RID) because we will need to re-get all of the other
2654: * columns from the heap anyway.
2655: * At this point in time, columns referenced anywhere in the column tree are
2656: * marked as being referenced. So, we clear all of the references, walk the
2657: * predicate list and remark the columns referenced from there and then add
2658: * the RID before compacting the columns.
2659: */
2660: if (bulkFetch != UNSET) {
2661: resultColumns.markAllUnreferenced();
2662: storeRestrictionList.markReferencedColumns();
2663: if (nonStoreRestrictionList != null) {
2664: nonStoreRestrictionList.markReferencedColumns();
2665: }
2666: }
2667: resultColumns.addRCForRID();
2668: templateColumns.addRCForRID();
2669:
2670: // Compact the RCL for the index scan down to the partial row.
2671: referencedCols = resultColumns.getReferencedFormatableBitSet(
2672: cursorTargetTable, false, false);
2673: resultColumns = resultColumns.compactColumns(cursorTargetTable,
2674: false);
2675: resultColumns.setIndexRow(baseConglomerateDescriptor
2676: .getConglomerateNumber(), forUpdate());
2677:
2678: /* We must remember if this was the cursorTargetTable
2679: * in order to get the right locking on the scan.
2680: */
2681: getUpdateLocks = cursorTargetTable;
2682: cursorTargetTable = false;
2683:
2684: return retval;
2685: }
2686:
2687: /**
2688: * Create a new ResultColumnList to reflect the columns in the
2689: * index described by the given ConglomerateDescriptor. The columns
2690: * in the new ResultColumnList are based on the columns in the given
2691: * ResultColumnList, which reflects the columns in the base table.
2692: *
2693: * @param oldColumns The original list of columns, which reflects
2694: * the columns in the base table.
2695: * @param idxCD The ConglomerateDescriptor, which describes
2696: * the index that the new ResultColumnList will
2697: * reflect.
2698: * @param heapCD The ConglomerateDescriptor for the base heap
2699: * @param cloneRCs Whether or not to clone the RCs
2700: *
2701: * @return A new ResultColumnList that reflects the columns in the index.
2702: *
2703: * @exception StandardException Thrown on error
2704: */
2705: private ResultColumnList newResultColumns(
2706: ResultColumnList oldColumns, ConglomerateDescriptor idxCD,
2707: ConglomerateDescriptor heapCD, boolean cloneRCs)
2708: throws StandardException {
2709: IndexRowGenerator irg = idxCD.getIndexDescriptor();
2710: int[] baseCols = irg.baseColumnPositions();
2711: ResultColumnList newCols = (ResultColumnList) getNodeFactory()
2712: .getNode(C_NodeTypes.RESULT_COLUMN_LIST,
2713: getContextManager());
2714:
2715: for (int i = 0; i < baseCols.length; i++) {
2716: int basePosition = baseCols[i];
2717: ResultColumn oldCol = oldColumns
2718: .getResultColumn(basePosition);
2719: ResultColumn newCol;
2720:
2721: if (SanityManager.DEBUG) {
2722: SanityManager.ASSERT(oldCol != null,
2723: "Couldn't find base column " + basePosition
2724: + "\n. RCL is\n" + oldColumns);
2725: }
2726:
2727: /* If we're cloning the RCs its because we are
2728: * building an RCL for the index when doing
2729: * a non-covering index scan. Set the expression
2730: * for the old RC to be a VCN pointing to the
2731: * new RC.
2732: */
2733: if (cloneRCs) {
2734: newCol = oldCol.cloneMe();
2735: oldCol.setExpression((ValueNode) getNodeFactory()
2736: .getNode(
2737: C_NodeTypes.VIRTUAL_COLUMN_NODE,
2738: this ,
2739: newCol,
2740: ReuseFactory.getInteger(oldCol
2741: .getVirtualColumnId()),
2742: getContextManager()));
2743: } else {
2744: newCol = oldCol;
2745: }
2746:
2747: newCols.addResultColumn(newCol);
2748: }
2749:
2750: /*
2751: ** The conglomerate is an index, so we need to generate a RowLocation
2752: ** as the last column of the result set. Notify the ResultColumnList
2753: ** that it needs to do this. Also tell the RCL whether this is
2754: ** the target of an update, so it can tell the conglomerate controller
2755: ** when it is getting the RowLocation template.
2756: */
2757: newCols
2758: .setIndexRow(heapCD.getConglomerateNumber(),
2759: forUpdate());
2760:
2761: return newCols;
2762: }
2763:
2764: /**
2765: * Generation on a FromBaseTable creates a scan on the
2766: * optimizer-selected conglomerate.
2767: *
2768: * @param acb The ActivationClassBuilder for the class being built
2769: * @param mb the execute() method to be built
2770: *
2771: * @exception StandardException Thrown on error
2772: */
2773: public void generate(ActivationClassBuilder acb, MethodBuilder mb)
2774: throws StandardException {
2775: generateResultSet(acb, mb);
2776:
2777: /*
2778: ** Remember if this base table is the cursor target table, so we can
2779: ** know which table to use when doing positioned update and delete
2780: */
2781: if (cursorTargetTable) {
2782: acb.rememberCursorTarget(mb);
2783: }
2784: }
2785:
2786: /**
2787: * Generation on a FromBaseTable for a SELECT. This logic was separated
2788: * out so that it could be shared with PREPARE SELECT FILTER.
2789: *
2790: * @param acb The ExpressionClassBuilder for the class being built
2791: * @param mb The execute() method to be built
2792: *
2793: * @exception StandardException Thrown on error
2794: */
2795: public void generateResultSet(ExpressionClassBuilder acb,
2796: MethodBuilder mb) throws StandardException {
2797: /* We must have been a best conglomerate descriptor here */
2798: if (SanityManager.DEBUG)
2799: SanityManager.ASSERT(getTrulyTheBestAccessPath()
2800: .getConglomerateDescriptor() != null);
2801:
2802: /* Get the next ResultSet #, so that we can number this ResultSetNode, its
2803: * ResultColumnList and ResultSet.
2804: */
2805: assignResultSetNumber();
2806:
2807: /*
2808: ** If we are doing a special scan to get the last row
2809: ** of an index, generate it separately.
2810: */
2811: if (specialMaxScan) {
2812: generateMaxSpecialResultSet(acb, mb);
2813: return;
2814: }
2815:
2816: /*
2817: ** If we are doing a special distinct scan, generate
2818: ** it separately.
2819: */
2820: if (distinctScan) {
2821: generateDistinctScan(acb, mb);
2822: return;
2823: }
2824:
2825: /*
2826: * Referential action dependent table scan, generate it
2827: * seperately.
2828: */
2829:
2830: if (raDependentScan) {
2831: generateRefActionDependentTableScan(acb, mb);
2832: return;
2833:
2834: }
2835:
2836: JoinStrategy trulyTheBestJoinStrategy = getTrulyTheBestAccessPath()
2837: .getJoinStrategy();
2838:
2839: // the table scan generator is what we return
2840: acb.pushGetResultSetFactoryExpression(mb);
2841:
2842: int nargs = getScanArguments(acb, mb);
2843:
2844: mb.callMethod(VMOpcode.INVOKEINTERFACE, (String) null,
2845: trulyTheBestJoinStrategy
2846: .resultSetMethodName(bulkFetch != UNSET),
2847: ClassName.NoPutResultSet, nargs);
2848:
2849: /* If this table is the target of an update or a delete, then we must
2850: * wrap the Expression up in an assignment expression before
2851: * returning.
2852: * NOTE - scanExpress is a ResultSet. We will need to cast it to the
2853: * appropriate subclass.
2854: * For example, for a DELETE, instead of returning a call to the
2855: * ResultSetFactory, we will generate and return:
2856: * this.SCANRESULTSET = (cast to appropriate ResultSet type)
2857: * The outer cast back to ResultSet is needed so that
2858: * we invoke the appropriate method.
2859: * (call to the ResultSetFactory)
2860: */
2861: if ((updateOrDelete == UPDATE) || (updateOrDelete == DELETE)) {
2862: mb.cast(ClassName.CursorResultSet);
2863: mb.putField(acb.getRowLocationScanResultSetName(),
2864: ClassName.CursorResultSet);
2865: mb.cast(ClassName.NoPutResultSet);
2866: }
2867: }
2868:
2869: /**
2870: * Get the final CostEstimate for this ResultSetNode.
2871: *
2872: * @return The final CostEstimate for this ResultSetNode.
2873: */
2874: public CostEstimate getFinalCostEstimate() {
2875: return getTrulyTheBestAccessPath().getCostEstimate();
2876: }
2877:
2878: /* helper method used by generateMaxSpecialResultSet and
2879: * generateDistinctScan to return the name of the index if the
2880: * conglomerate is an index.
2881: * @param cd Conglomerate for which we need to push the index name
2882: * @param mb Associated MethodBuilder
2883: * @throws StandardException
2884: */
2885: private void pushIndexName(ConglomerateDescriptor cd,
2886: MethodBuilder mb) throws StandardException {
2887: if (cd.isConstraint()) {
2888: DataDictionary dd = getDataDictionary();
2889: ConstraintDescriptor constraintDesc = dd
2890: .getConstraintDescriptor(tableDescriptor, cd
2891: .getUUID());
2892: mb.push(constraintDesc.getConstraintName());
2893: } else if (cd.isIndex()) {
2894: mb.push(cd.getConglomerateName());
2895: } else {
2896: // If the conglomerate is the base table itself, make sure we push null.
2897: // Before the fix for DERBY-578, we would push the base table name
2898: // and this was just plain wrong and would cause statistics information to be incorrect.
2899: mb.pushNull("java.lang.String");
2900: }
2901: }
2902:
2903: private void generateMaxSpecialResultSet(
2904: ExpressionClassBuilder acb, MethodBuilder mb)
2905: throws StandardException {
2906: ConglomerateDescriptor cd = getTrulyTheBestAccessPath()
2907: .getConglomerateDescriptor();
2908: CostEstimate costEstimate = getFinalCostEstimate();
2909: int colRefItem = (referencedCols == null) ? -1 : acb
2910: .addItem(referencedCols);
2911: boolean tableLockGranularity = tableDescriptor
2912: .getLockGranularity() == TableDescriptor.TABLE_LOCK_GRANULARITY;
2913:
2914: /*
2915: ** getLastIndexKeyResultSet
2916: ** (
2917: ** activation,
2918: ** resultSetNumber,
2919: ** resultRowAllocator,
2920: ** conglomereNumber,
2921: ** tableName,
2922: ** optimizeroverride
2923: ** indexName,
2924: ** colRefItem,
2925: ** lockMode,
2926: ** tableLocked,
2927: ** isolationLevel,
2928: ** optimizerEstimatedRowCount,
2929: ** optimizerEstimatedRowCost,
2930: ** );
2931: */
2932:
2933: acb.pushGetResultSetFactoryExpression(mb);
2934:
2935: acb.pushThisAsActivation(mb);
2936: mb.push(getResultSetNumber());
2937: resultColumns.generateHolder(acb, mb, referencedCols,
2938: (FormatableBitSet) null);
2939: mb.push(cd.getConglomerateNumber());
2940: mb.push(tableDescriptor.getName());
2941: //User may have supplied optimizer overrides in the sql
2942: //Pass them onto execute phase so it can be shown in
2943: //run time statistics.
2944: if (tableProperties != null)
2945: mb.push(org.apache.derby.iapi.util.PropertyUtil
2946: .sortProperties(tableProperties));
2947: else
2948: mb.pushNull("java.lang.String");
2949: pushIndexName(cd, mb);
2950: mb.push(colRefItem);
2951: mb.push(getTrulyTheBestAccessPath().getLockMode());
2952: mb.push(tableLockGranularity);
2953: mb.push(getCompilerContext().getScanIsolationLevel());
2954: mb.push(costEstimate.singleScanRowCount());
2955: mb.push(costEstimate.getEstimatedCost());
2956:
2957: mb.callMethod(VMOpcode.INVOKEINTERFACE, (String) null,
2958: "getLastIndexKeyResultSet", ClassName.NoPutResultSet,
2959: 13);
2960:
2961: }
2962:
2963: private void generateDistinctScan(ExpressionClassBuilder acb,
2964: MethodBuilder mb) throws StandardException {
2965: ConglomerateDescriptor cd = getTrulyTheBestAccessPath()
2966: .getConglomerateDescriptor();
2967: CostEstimate costEstimate = getFinalCostEstimate();
2968: int colRefItem = (referencedCols == null) ? -1 : acb
2969: .addItem(referencedCols);
2970: boolean tableLockGranularity = tableDescriptor
2971: .getLockGranularity() == TableDescriptor.TABLE_LOCK_GRANULARITY;
2972:
2973: /*
2974: ** getDistinctScanResultSet
2975: ** (
2976: ** activation,
2977: ** resultSetNumber,
2978: ** resultRowAllocator,
2979: ** conglomereNumber,
2980: ** tableName,
2981: ** optimizeroverride
2982: ** indexName,
2983: ** colRefItem,
2984: ** lockMode,
2985: ** tableLocked,
2986: ** isolationLevel,
2987: ** optimizerEstimatedRowCount,
2988: ** optimizerEstimatedRowCost,
2989: ** closeCleanupMethod
2990: ** );
2991: */
2992:
2993: /* Get the hash key columns and wrap them in a formattable */
2994: int[] hashKeyColumns;
2995:
2996: hashKeyColumns = new int[resultColumns.size()];
2997: if (referencedCols == null) {
2998: for (int index = 0; index < hashKeyColumns.length; index++) {
2999: hashKeyColumns[index] = index;
3000: }
3001: } else {
3002: int index = 0;
3003: for (int colNum = referencedCols.anySetBit(); colNum != -1; colNum = referencedCols
3004: .anySetBit(colNum)) {
3005: hashKeyColumns[index++] = colNum;
3006: }
3007: }
3008:
3009: FormatableIntHolder[] fihArray = FormatableIntHolder
3010: .getFormatableIntHolders(hashKeyColumns);
3011: FormatableArrayHolder hashKeyHolder = new FormatableArrayHolder(
3012: fihArray);
3013: int hashKeyItem = acb.addItem(hashKeyHolder);
3014: long conglomNumber = cd.getConglomerateNumber();
3015: StaticCompiledOpenConglomInfo scoci = getLanguageConnectionContext()
3016: .getTransactionCompile().getStaticCompiledConglomInfo(
3017: conglomNumber);
3018:
3019: acb.pushGetResultSetFactoryExpression(mb);
3020:
3021: acb.pushThisAsActivation(mb);
3022: mb.push(conglomNumber);
3023: mb.push(acb.addItem(scoci));
3024: resultColumns.generateHolder(acb, mb, referencedCols,
3025: (FormatableBitSet) null);
3026: mb.push(getResultSetNumber());
3027: mb.push(hashKeyItem);
3028: mb.push(tableDescriptor.getName());
3029: //User may have supplied optimizer overrides in the sql
3030: //Pass them onto execute phase so it can be shown in
3031: //run time statistics.
3032: if (tableProperties != null)
3033: mb.push(org.apache.derby.iapi.util.PropertyUtil
3034: .sortProperties(tableProperties));
3035: else
3036: mb.pushNull("java.lang.String");
3037: pushIndexName(cd, mb);
3038: mb.push(cd.isConstraint());
3039: mb.push(colRefItem);
3040: mb.push(getTrulyTheBestAccessPath().getLockMode());
3041: mb.push(tableLockGranularity);
3042: mb.push(getCompilerContext().getScanIsolationLevel());
3043: mb.push(costEstimate.singleScanRowCount());
3044: mb.push(costEstimate.getEstimatedCost());
3045:
3046: mb.callMethod(VMOpcode.INVOKEINTERFACE, (String) null,
3047: "getDistinctScanResultSet", ClassName.NoPutResultSet,
3048: 16);
3049: }
3050:
3051: /**
3052: * Generation on a FromBaseTable for a referential action dependent table.
3053: *
3054: * @param acb The ExpressionClassBuilder for the class being built
3055: * @param mb The execute() method to be built
3056: *
3057: * @exception StandardException Thrown on error
3058: */
3059:
3060: private void generateRefActionDependentTableScan(
3061: ExpressionClassBuilder acb, MethodBuilder mb)
3062: throws StandardException {
3063:
3064: acb.pushGetResultSetFactoryExpression(mb);
3065:
3066: //get the parameters required to do a table scan
3067: int nargs = getScanArguments(acb, mb);
3068:
3069: //extra parameters required to create an dependent table result set.
3070: mb.push(raParentResultSetId); //id for the parent result set.
3071: mb.push(fkIndexConglomId);
3072: mb.push(acb.addItem(fkColArray));
3073: mb.push(acb.addItem(getDataDictionary().getRowLocationTemplate(
3074: getLanguageConnectionContext(), tableDescriptor)));
3075:
3076: int argCount = nargs + 4;
3077: mb.callMethod(VMOpcode.INVOKEINTERFACE, (String) null,
3078: "getRaDependentTableScanResultSet",
3079: ClassName.NoPutResultSet, argCount);
3080:
3081: if ((updateOrDelete == UPDATE) || (updateOrDelete == DELETE)) {
3082: mb.cast(ClassName.CursorResultSet);
3083: mb.putField(acb.getRowLocationScanResultSetName(),
3084: ClassName.CursorResultSet);
3085: mb.cast(ClassName.NoPutResultSet);
3086: }
3087:
3088: }
3089:
3090: private int getScanArguments(ExpressionClassBuilder acb,
3091: MethodBuilder mb) throws StandardException {
3092: // get a function to allocate scan rows of the right shape and size
3093: MethodBuilder resultRowAllocator = resultColumns
3094: .generateHolderMethod(acb, referencedCols,
3095: (FormatableBitSet) null);
3096:
3097: // pass in the referenced columns on the saved objects
3098: // chain
3099: int colRefItem = -1;
3100: if (referencedCols != null) {
3101: colRefItem = acb.addItem(referencedCols);
3102: }
3103:
3104: // beetle entry 3865: updateable cursor using index
3105: int indexColItem = -1;
3106: if (cursorTargetTable || getUpdateLocks) {
3107: ConglomerateDescriptor cd = getTrulyTheBestAccessPath()
3108: .getConglomerateDescriptor();
3109: if (cd.isIndex()) {
3110: int[] baseColPos = cd.getIndexDescriptor()
3111: .baseColumnPositions();
3112: boolean[] isAscending = cd.getIndexDescriptor()
3113: .isAscending();
3114: int[] indexCols = new int[baseColPos.length];
3115: for (int i = 0; i < indexCols.length; i++)
3116: indexCols[i] = isAscending[i] ? baseColPos[i]
3117: : -baseColPos[i];
3118: indexColItem = acb.addItem(indexCols);
3119: }
3120: }
3121:
3122: AccessPath ap = getTrulyTheBestAccessPath();
3123: JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
3124:
3125: /*
3126: ** We can only do bulkFetch on NESTEDLOOP
3127: */
3128: if (SanityManager.DEBUG) {
3129: if ((!trulyTheBestJoinStrategy.bulkFetchOK())
3130: && (bulkFetch != UNSET)) {
3131: SanityManager
3132: .THROWASSERT("bulkFetch should not be set "
3133: + "for the join strategy "
3134: + trulyTheBestJoinStrategy.getName());
3135: }
3136: }
3137:
3138: int nargs = trulyTheBestJoinStrategy
3139: .getScanArgs(
3140: getLanguageConnectionContext()
3141: .getTransactionCompile(),
3142: mb,
3143: this ,
3144: storeRestrictionList,
3145: nonStoreRestrictionList,
3146: acb,
3147: bulkFetch,
3148: resultRowAllocator,
3149: colRefItem,
3150: indexColItem,
3151: getTrulyTheBestAccessPath().getLockMode(),
3152: (tableDescriptor.getLockGranularity() == TableDescriptor.TABLE_LOCK_GRANULARITY),
3153: getCompilerContext().getScanIsolationLevel(),
3154: ap.getOptimizer().getMaxMemoryPerTable());
3155:
3156: return nargs;
3157: }
3158:
3159: /**
3160: * Convert an absolute to a relative 0-based column position.
3161: *
3162: * @param absolutePosition The absolute 0-based column position.
3163: *
3164: * @return The relative 0-based column position.
3165: */
3166: private int mapAbsoluteToRelativeColumnPosition(int absolutePosition) {
3167: if (referencedCols == null) {
3168: return absolutePosition;
3169: }
3170:
3171: /* setBitCtr counts the # of columns in the row,
3172: * from the leftmost to the absolutePosition, that will be
3173: * in the partial row returned by the store. This becomes
3174: * the new value for column position.
3175: */
3176: int setBitCtr = 0;
3177: int bitCtr = 0;
3178: for (; bitCtr < referencedCols.size()
3179: && bitCtr < absolutePosition; bitCtr++) {
3180: if (referencedCols.get(bitCtr)) {
3181: setBitCtr++;
3182: }
3183: }
3184: return setBitCtr;
3185: }
3186:
3187: /**
3188: * Get the exposed name for this table, which is the name that can
3189: * be used to refer to it in the rest of the query.
3190: *
3191: * @return The exposed name of this table.
3192: *
3193: */
3194: public String getExposedName() {
3195: if (correlationName != null)
3196: return correlationName;
3197: else
3198: return getOrigTableName().getFullTableName();
3199: }
3200:
3201: /**
3202: * Get the exposed table name for this table, which is the name that can
3203: * be used to refer to it in the rest of the query.
3204: *
3205: * @return TableName The exposed name of this table.
3206: *
3207: * @exception StandardException Thrown on error
3208: */
3209: private TableName getExposedTableName() throws StandardException {
3210: if (correlationName != null)
3211: return makeTableName(null, correlationName);
3212: else
3213: return getOrigTableName();
3214: }
3215:
3216: /**
3217: * Return the table name for this table.
3218: *
3219: * @return The table name for this table.
3220: */
3221:
3222: public TableName getTableNameField() {
3223: return tableName;
3224: }
3225:
3226: /**
3227: * Return a ResultColumnList with all of the columns in this table.
3228: * (Used in expanding '*'s.)
3229: * NOTE: Since this method is for expanding a "*" in the SELECT list,
3230: * ResultColumn.expression will be a ColumnReference.
3231: *
3232: * @param allTableName The qualifier on the "*"
3233: *
3234: * @return ResultColumnList List of result columns from this table.
3235: *
3236: * @exception StandardException Thrown on error
3237: */
3238: public ResultColumnList getAllResultColumns(TableName allTableName)
3239: throws StandardException {
3240: return getResultColumnsForList(allTableName, resultColumns,
3241: getOrigTableName());
3242: }
3243:
3244: /**
3245: * Build a ResultColumnList based on all of the columns in this FromBaseTable.
3246: * NOTE - Since the ResultColumnList generated is for the FromBaseTable,
3247: * ResultColumn.expression will be a BaseColumnNode.
3248: *
3249: * @return ResultColumnList representing all referenced columns
3250: *
3251: * @exception StandardException Thrown on error
3252: */
3253: public ResultColumnList genResultColList() throws StandardException {
3254: ResultColumnList rcList = null;
3255: ResultColumn resultColumn;
3256: ValueNode valueNode;
3257: ColumnDescriptor colDesc = null;
3258: TableName exposedName;
3259:
3260: /* Cache exposed name for this table.
3261: * The exposed name becomes the qualifier for each column
3262: * in the expanded list.
3263: */
3264: exposedName = getExposedTableName();
3265:
3266: /* Add all of the columns in the table */
3267: rcList = (ResultColumnList) getNodeFactory().getNode(
3268: C_NodeTypes.RESULT_COLUMN_LIST, getContextManager());
3269: ColumnDescriptorList cdl = tableDescriptor
3270: .getColumnDescriptorList();
3271: int cdlSize = cdl.size();
3272:
3273: for (int index = 0; index < cdlSize; index++) {
3274: /* Build a ResultColumn/BaseColumnNode pair for the column */
3275: colDesc = (ColumnDescriptor) cdl.elementAt(index);
3276: //A ColumnDescriptor instantiated through SYSCOLUMNSRowFactory only has
3277: //the uuid set on it and no table descriptor set on it. Since we know here
3278: //that this columnDescriptor is tied to tableDescriptor, set it so using
3279: //setTableDescriptor method. ColumnDescriptor's table descriptor is used
3280: //to get ResultSetMetaData.getTableName & ResultSetMetaData.getSchemaName
3281: colDesc.setTableDescriptor(tableDescriptor);
3282:
3283: valueNode = (ValueNode) getNodeFactory().getNode(
3284: C_NodeTypes.BASE_COLUMN_NODE,
3285: colDesc.getColumnName(), exposedName,
3286: colDesc.getType(), getContextManager());
3287: resultColumn = (ResultColumn) getNodeFactory().getNode(
3288: C_NodeTypes.RESULT_COLUMN, colDesc, valueNode,
3289: getContextManager());
3290:
3291: /* Build the ResultColumnList to return */
3292: rcList.addResultColumn(resultColumn);
3293: }
3294:
3295: return rcList;
3296: }
3297:
3298: /**
3299: * Augment the RCL to include the columns in the FormatableBitSet.
3300: * If the column is already there, don't add it twice.
3301: * Column is added as a ResultColumn pointing to a
3302: * ColumnReference.
3303: *
3304: * @param inputRcl The original list
3305: * @param colsWeWant bit set of cols we want
3306: *
3307: * @return ResultColumnList the rcl
3308: *
3309: * @exception StandardException Thrown on error
3310: */
3311: public ResultColumnList addColsToList(ResultColumnList inputRcl,
3312: FormatableBitSet colsWeWant) throws StandardException {
3313: ResultColumnList rcList = null;
3314: ResultColumn resultColumn;
3315: ValueNode valueNode;
3316: ColumnDescriptor cd = null;
3317: TableName exposedName;
3318:
3319: /* Cache exposed name for this table.
3320: * The exposed name becomes the qualifier for each column
3321: * in the expanded list.
3322: */
3323: exposedName = getExposedTableName();
3324:
3325: /* Add all of the columns in the table */
3326: ResultColumnList newRcl = (ResultColumnList) getNodeFactory()
3327: .getNode(C_NodeTypes.RESULT_COLUMN_LIST,
3328: getContextManager());
3329: ColumnDescriptorList cdl = tableDescriptor
3330: .getColumnDescriptorList();
3331: int cdlSize = cdl.size();
3332:
3333: for (int index = 0; index < cdlSize; index++) {
3334: /* Build a ResultColumn/BaseColumnNode pair for the column */
3335: cd = (ColumnDescriptor) cdl.elementAt(index);
3336: int position = cd.getPosition();
3337:
3338: if (!colsWeWant.get(position)) {
3339: continue;
3340: }
3341:
3342: if ((resultColumn = inputRcl.getResultColumn(position)) == null) {
3343: valueNode = (ValueNode) getNodeFactory().getNode(
3344: C_NodeTypes.COLUMN_REFERENCE,
3345: cd.getColumnName(), exposedName,
3346: getContextManager());
3347: resultColumn = (ResultColumn) getNodeFactory().getNode(
3348: C_NodeTypes.RESULT_COLUMN, cd, valueNode,
3349: getContextManager());
3350: }
3351:
3352: /* Build the ResultColumnList to return */
3353: newRcl.addResultColumn(resultColumn);
3354: }
3355:
3356: return newRcl;
3357: }
3358:
3359: /**
3360: * Return a TableName node representing this FromTable.
3361: * @return a TableName node representing this FromTable.
3362: * @exception StandardException Thrown on error
3363: */
3364: public TableName getTableName() throws StandardException {
3365: TableName tn;
3366:
3367: tn = super .getTableName();
3368:
3369: if (tn != null) {
3370: if (tn.getSchemaName() == null && correlationName == null)
3371: tn.bind(this .getDataDictionary());
3372: }
3373:
3374: return (tn != null ? tn : tableName);
3375: }
3376:
3377: /**
3378: Mark this ResultSetNode as the target table of an updatable
3379: cursor.
3380: */
3381: public boolean markAsCursorTargetTable() {
3382: cursorTargetTable = true;
3383: return true;
3384: }
3385:
3386: /**
3387: * Is this a table that has a FOR UPDATE
3388: * clause?
3389: *
3390: * @return true/false
3391: */
3392: protected boolean cursorTargetTable() {
3393: return cursorTargetTable;
3394: }
3395:
3396: /**
3397: * Mark as updatable all the columns in the result column list of this
3398: * FromBaseTable that match the columns in the given update column list.
3399: *
3400: * @param updateColumns A ResultColumnList representing the columns
3401: * to be updated.
3402: */
3403: void markUpdated(ResultColumnList updateColumns) {
3404: resultColumns.markUpdated(updateColumns);
3405: }
3406:
3407: /**
3408: * Search to see if a query references the specifed table name.
3409: *
3410: * @param name Table name (String) to search for.
3411: * @param baseTable Whether or not name is for a base table
3412: *
3413: * @return true if found, else false
3414: *
3415: * @exception StandardException Thrown on error
3416: */
3417: public boolean referencesTarget(String name, boolean baseTable)
3418: throws StandardException {
3419: return baseTable && name.equals(getBaseTableName());
3420: }
3421:
3422: /**
3423: * Return true if the node references SESSION schema tables (temporary or permanent)
3424: *
3425: * @return true if references SESSION schema tables, else false
3426: *
3427: * @exception StandardException Thrown on error
3428: */
3429: public boolean referencesSessionSchema() throws StandardException {
3430: //If base table is a SESSION schema table, then return true.
3431: return isSessionSchema(tableDescriptor.getSchemaDescriptor());
3432: }
3433:
3434: /**
3435: * Return whether or not the underlying ResultSet tree will return
3436: * a single row, at most. This method is intended to be used during
3437: * generation, after the "truly" best conglomerate has been chosen.
3438: * This is important for join nodes where we can save the extra next
3439: * on the right side if we know that it will return at most 1 row.
3440: *
3441: * @return Whether or not the underlying ResultSet tree will return a single row.
3442: * @exception StandardException Thrown on error
3443: */
3444: public boolean isOneRowResultSet() throws StandardException {
3445: // EXISTS FBT will only return a single row
3446: if (existsBaseTable) {
3447: return true;
3448: }
3449:
3450: /* For hash join, we need to consider both the qualification
3451: * and hash join predicates and we consider them against all
3452: * conglomerates since we are looking for any uniqueness
3453: * condition that holds on the columns in the hash table,
3454: * otherwise we just consider the predicates in the
3455: * restriction list and the conglomerate being scanned.
3456:
3457: */
3458: AccessPath ap = getTrulyTheBestAccessPath();
3459: JoinStrategy trulyTheBestJoinStrategy = ap.getJoinStrategy();
3460: PredicateList pl;
3461:
3462: if (trulyTheBestJoinStrategy.isHashJoin()) {
3463: pl = (PredicateList) getNodeFactory().getNode(
3464: C_NodeTypes.PREDICATE_LIST, getContextManager());
3465: if (storeRestrictionList != null) {
3466: pl.nondestructiveAppend(storeRestrictionList);
3467: }
3468: if (nonStoreRestrictionList != null) {
3469: pl.nondestructiveAppend(nonStoreRestrictionList);
3470: }
3471: return isOneRowResultSet(pl);
3472: } else {
3473: return isOneRowResultSet(getTrulyTheBestAccessPath()
3474: .getConglomerateDescriptor(), restrictionList);
3475: }
3476: }
3477:
3478: /**
3479: * Return whether or not this is actually a EBT for NOT EXISTS.
3480: */
3481: public boolean isNotExists() {
3482: return isNotExists;
3483: }
3484:
3485: public boolean isOneRowResultSet(OptimizablePredicateList predList)
3486: throws StandardException {
3487: ConglomerateDescriptor[] cds = tableDescriptor
3488: .getConglomerateDescriptors();
3489:
3490: for (int index = 0; index < cds.length; index++) {
3491: if (isOneRowResultSet(cds[index], predList)) {
3492: return true;
3493: }
3494: }
3495:
3496: return false;
3497: }
3498:
3499: /**
3500: * Determine whether or not the columns marked as true in
3501: * the passed in array are a superset of any unique index
3502: * on this table.
3503: * This is useful for subquery flattening and distinct elimination
3504: * based on a uniqueness condition.
3505: *
3506: * @param eqCols The columns to consider
3507: *
3508: * @return Whether or not the columns marked as true are a superset
3509: */
3510: protected boolean super setOfUniqueIndex(boolean[] eqCols)
3511: throws StandardException {
3512: ConglomerateDescriptor[] cds = tableDescriptor
3513: .getConglomerateDescriptors();
3514:
3515: /* Cycle through the ConglomerateDescriptors */
3516: for (int index = 0; index < cds.length; index++) {
3517: ConglomerateDescriptor cd = cds[index];
3518:
3519: if (!cd.isIndex()) {
3520: continue;
3521: }
3522: IndexDescriptor id = cd.getIndexDescriptor();
3523:
3524: if (!id.isUnique()) {
3525: continue;
3526: }
3527:
3528: int[] keyColumns = id.baseColumnPositions();
3529:
3530: int inner = 0;
3531: for (; inner < keyColumns.length; inner++) {
3532: if (!eqCols[keyColumns[inner]]) {
3533: break;
3534: }
3535: }
3536:
3537: /* Did we get a full match? */
3538: if (inner == keyColumns.length) {
3539: return true;
3540: }
3541: }
3542:
3543: return false;
3544: }
3545:
3546: /**
3547: * Determine whether or not the columns marked as true in
3548: * the passed in join table matrix are a superset of any single column unique index
3549: * on this table.
3550: * This is useful for distinct elimination
3551: * based on a uniqueness condition.
3552: *
3553: * @param tableColMap The columns to consider
3554: *
3555: * @return Whether or not the columns marked as true for one at least
3556: * one table are a superset
3557: */
3558: protected boolean super setOfUniqueIndex(JBitSet[] tableColMap)
3559: throws StandardException {
3560: ConglomerateDescriptor[] cds = tableDescriptor
3561: .getConglomerateDescriptors();
3562:
3563: /* Cycle through the ConglomerateDescriptors */
3564: for (int index = 0; index < cds.length; index++) {
3565: ConglomerateDescriptor cd = cds[index];
3566:
3567: if (!cd.isIndex()) {
3568: continue;
3569: }
3570: IndexDescriptor id = cd.getIndexDescriptor();
3571:
3572: if (!id.isUnique()) {
3573: continue;
3574: }
3575:
3576: int[] keyColumns = id.baseColumnPositions();
3577: int numBits = tableColMap[0].size();
3578: JBitSet keyMap = new JBitSet(numBits);
3579: JBitSet resMap = new JBitSet(numBits);
3580:
3581: int inner = 0;
3582: for (; inner < keyColumns.length; inner++) {
3583: keyMap.set(keyColumns[inner]);
3584: }
3585: int table = 0;
3586: for (; table < tableColMap.length; table++) {
3587: resMap.setTo(tableColMap[table]);
3588: resMap.and(keyMap);
3589: if (keyMap.equals(resMap)) {
3590: tableColMap[table].set(0);
3591: return true;
3592: }
3593: }
3594:
3595: }
3596:
3597: return false;
3598: }
3599:
3600: /**
3601: * Get the lock mode for the target table heap of an update or delete
3602: * statement. It is not always MODE_RECORD. We want the lock on the
3603: * heap to be consistent with optimizer and eventually system's decision.
3604: * This is to avoid deadlock (beetle 4318). During update/delete's
3605: * execution, it will first use this lock mode we return to lock heap to
3606: * open a RowChanger, then use the lock mode that is the optimizer and
3607: * system's combined decision to open the actual source conglomerate.
3608: * We've got to make sure they are consistent. This is the lock chart (for
3609: * detail reason, see comments below):
3610: * BEST ACCESS PATH LOCK MODE ON HEAP
3611: * ---------------------- -----------------------------------------
3612: * index row lock
3613: *
3614: * heap row lock if READ_COMMITTED,
3615: * REPEATBLE_READ, or READ_UNCOMMITTED &&
3616: * not specified table lock otherwise,
3617: * use optimizer decided best acess
3618: * path's lock mode
3619: *
3620: * @return The lock mode
3621: */
3622: public int updateTargetLockMode() {
3623: /* if best access path is index scan, we always use row lock on heap,
3624: * consistent with IndexRowToBaseRowResultSet's openCore(). We don't
3625: * need to worry about the correctness of serializable isolation level
3626: * because index will have previous key locking if it uses row locking
3627: * as well.
3628: */
3629: if (getTrulyTheBestAccessPath().getConglomerateDescriptor()
3630: .isIndex())
3631: return TransactionController.MODE_RECORD;
3632:
3633: /* we override optimizer's decision of the lock mode on heap, and
3634: * always use row lock if we are read committed/uncommitted or
3635: * repeatable read isolation level, and no forced table lock.
3636: *
3637: * This is also reflected in TableScanResultSet's constructor,
3638: * KEEP THEM CONSISTENT!
3639: *
3640: * This is to improve concurrency, while maintaining correctness with
3641: * serializable level. Since the isolation level can change between
3642: * compilation and execution if the statement is cached or stored, we
3643: * encode both the SERIALIZABLE lock mode and the non-SERIALIZABLE
3644: * lock mode in the returned lock mode if they are different.
3645: */
3646: int isolationLevel = getLanguageConnectionContext()
3647: .getCurrentIsolationLevel();
3648:
3649: if ((isolationLevel != ExecutionContext.SERIALIZABLE_ISOLATION_LEVEL)
3650: && (tableDescriptor.getLockGranularity() != TableDescriptor.TABLE_LOCK_GRANULARITY)) {
3651: int lockMode = getTrulyTheBestAccessPath().getLockMode();
3652: if (lockMode != TransactionController.MODE_RECORD)
3653: lockMode = (lockMode & 0xff) << 16;
3654: else
3655: lockMode = 0;
3656: lockMode += TransactionController.MODE_RECORD;
3657:
3658: return lockMode;
3659: }
3660:
3661: /* if above don't apply, use optimizer's decision on heap's lock
3662: */
3663: return getTrulyTheBestAccessPath().getLockMode();
3664: }
3665:
3666: /**
3667: * Return whether or not the underlying ResultSet tree
3668: * is ordered on the specified columns.
3669: * RESOLVE - This method currently only considers the outermost table
3670: * of the query block.
3671: * RESOLVE - We do not currently push method calls down, so we don't
3672: * worry about whether the equals comparisons can be against a variant method.
3673: *
3674: * @param crs The specified ColumnReference[]
3675: * @param permuteOrdering Whether or not the order of the CRs in the array can be permuted
3676: * @param fbtVector Vector that is to be filled with the FromBaseTable
3677: *
3678: * @return Whether the underlying ResultSet tree
3679: * is ordered on the specified column.
3680: *
3681: * @exception StandardException Thrown on error
3682: */
3683: boolean isOrderedOn(ColumnReference[] crs, boolean permuteOrdering,
3684: Vector fbtVector) throws StandardException {
3685: /* The following conditions must be met, regardless of the value of permuteOrdering,
3686: * in order for the table to be ordered on the specified columns:
3687: * o Each column is from this table. (RESOLVE - handle joins later)
3688: * o The access path for this table is an index.
3689: */
3690: // Verify that all CRs are from this table
3691: for (int index = 0; index < crs.length; index++) {
3692: if (crs[index].getTableNumber() != tableNumber) {
3693: return false;
3694: }
3695: }
3696: // Verify access path is an index
3697: ConglomerateDescriptor cd = getTrulyTheBestAccessPath()
3698: .getConglomerateDescriptor();
3699: if (!cd.isIndex()) {
3700: return false;
3701: }
3702:
3703: // Now consider whether or not the CRs can be permuted
3704: boolean isOrdered;
3705: if (permuteOrdering) {
3706: isOrdered = isOrdered(crs, cd);
3707: } else {
3708: isOrdered = isStrictlyOrdered(crs, cd);
3709: }
3710:
3711: if (fbtVector != null) {
3712: fbtVector.addElement(this );
3713: }
3714:
3715: return isOrdered;
3716: }
3717:
3718: /**
3719: * Turn off bulk fetch
3720: */
3721: void disableBulkFetch() {
3722: bulkFetchTurnedOff = true;
3723: bulkFetch = UNSET;
3724: }
3725:
3726: /**
3727: * Do a special scan for max.
3728: */
3729: void doSpecialMaxScan() {
3730: if (SanityManager.DEBUG) {
3731: if ((restrictionList.size() != 0)
3732: || (storeRestrictionList.size() != 0)
3733: || (nonStoreRestrictionList.size() != 0)) {
3734: SanityManager
3735: .THROWASSERT("shouldn't be setting max special scan because there is a restriction");
3736: }
3737: }
3738: specialMaxScan = true;
3739: }
3740:
3741: /**
3742: * Is it possible to do a distinct scan on this ResultSet tree.
3743: * (See SelectNode for the criteria.)
3744: *
3745: * @param distinctColumns the set of distinct columns
3746: * @return Whether or not it is possible to do a distinct scan on this ResultSet tree.
3747: */
3748: boolean isPossibleDistinctScan(Set distinctColumns) {
3749: if ((restrictionList != null && restrictionList.size() != 0)) {
3750: return false;
3751: }
3752:
3753: HashSet columns = new HashSet();
3754: for (int i = 0; i < resultColumns.size(); i++) {
3755: ResultColumn rc = (ResultColumn) resultColumns.elementAt(i);
3756: columns.add(rc.getExpression());
3757: }
3758:
3759: return columns.equals(distinctColumns);
3760: }
3761:
3762: /**
3763: * Mark the underlying scan as a distinct scan.
3764: */
3765: void markForDistinctScan() {
3766: distinctScan = true;
3767: }
3768:
3769: /**
3770: * Notify the underlying result set tree that the result is
3771: * ordering dependent. (For example, no bulk fetch on an index
3772: * if under an IndexRowToBaseRow.)
3773: */
3774: void markOrderingDependent() {
3775: /* NOTE: IRTBR will use a different method to tell us that
3776: * it cannot do a bulk fetch as the ordering issues are
3777: * specific to a FBT being under an IRTBR as opposed to a
3778: * FBT being under a PRN, etc.
3779: * So, we just ignore this call for now.
3780: */
3781: }
3782:
3783: /**
3784: * Return whether or not this index is ordered on a permutation of the specified columns.
3785: *
3786: * @param crs The specified ColumnReference[]
3787: * @param cd The ConglomerateDescriptor for the chosen index.
3788: *
3789: * @return Whether or not this index is ordered exactly on the specified columns.
3790: *
3791: * @exception StandardException Thrown on error
3792: */
3793: private boolean isOrdered(ColumnReference[] crs,
3794: ConglomerateDescriptor cd) throws StandardException {
3795: /* This table is ordered on a permutation of the specified columns if:
3796: * o For each key column, until a match has been found for all of the
3797: * ColumnReferences, it is either in the array of ColumnReferences
3798: * or there is an equality predicate on it.
3799: * (NOTE: It is okay to exhaust the key columns before the ColumnReferences
3800: * if the index is unique. In other words if we have CRs left over after
3801: * matching all of the columns in the key then the table is considered ordered
3802: * iff the index is unique. For example:
3803: * i1 on (c1, c2), unique
3804: * select distinct c3 from t1 where c1 = 1 and c2 = ?;
3805: * is ordered on c3 since there will be at most 1 qualifying row.)
3806: */
3807: boolean[] matchedCRs = new boolean[crs.length];
3808:
3809: int nextKeyColumn = 0;
3810: int[] keyColumns = cd.getIndexDescriptor()
3811: .baseColumnPositions();
3812:
3813: // Walk through the key columns
3814: for (; nextKeyColumn < keyColumns.length; nextKeyColumn++) {
3815: boolean currMatch = false;
3816: // See if the key column is in crs
3817: for (int nextCR = 0; nextCR < crs.length; nextCR++) {
3818: if (crs[nextCR].getColumnNumber() == keyColumns[nextKeyColumn]) {
3819: matchedCRs[nextCR] = true;
3820: currMatch = true;
3821: break;
3822: }
3823: }
3824:
3825: // Advance to next key column if we found a match on this one
3826: if (currMatch) {
3827: continue;
3828: }
3829:
3830: // Stop search if there is no equality predicate on this key column
3831: if (!storeRestrictionList.hasOptimizableEqualityPredicate(
3832: this , keyColumns[nextKeyColumn], true)) {
3833: break;
3834: }
3835: }
3836:
3837: /* Count the number of matched CRs. The table is ordered if we matched all of them. */
3838: int numCRsMatched = 0;
3839: for (int nextCR = 0; nextCR < matchedCRs.length; nextCR++) {
3840: if (matchedCRs[nextCR]) {
3841: numCRsMatched++;
3842: }
3843: }
3844:
3845: if (numCRsMatched == matchedCRs.length) {
3846: return true;
3847: }
3848:
3849: /* We didn't match all of the CRs, but if
3850: * we matched all of the key columns then
3851: * we need to check if the index is unique.
3852: */
3853: if (nextKeyColumn == keyColumns.length) {
3854: if (cd.getIndexDescriptor().isUnique()) {
3855: return true;
3856: } else {
3857: return false;
3858: }
3859: } else {
3860: return false;
3861: }
3862: }
3863:
3864: /**
3865: * Return whether or not this index is ordered on a permutation of the specified columns.
3866: *
3867: * @param crs The specified ColumnReference[]
3868: * @param cd The ConglomerateDescriptor for the chosen index.
3869: *
3870: * @return Whether or not this index is ordered exactly on the specified columns.
3871: *
3872: * @exception StandardException Thrown on error
3873: */
3874: private boolean isStrictlyOrdered(ColumnReference[] crs,
3875: ConglomerateDescriptor cd) throws StandardException {
3876: /* This table is ordered on the specified columns in the specified order if:
3877: * o For each ColumnReference, it is either the next key column or there
3878: * is an equality predicate on all key columns prior to the ColumnReference.
3879: * (NOTE: If the index is unique, then it is okay to have a suffix of
3880: * unmatched ColumnReferences because the set is known to be ordered. For example:
3881: * i1 on (c1, c2), unique
3882: * select distinct c3 from t1 where c1 = 1 and c2 = ?;
3883: * is ordered on c3 since there will be at most 1 qualifying row.)
3884: */
3885: int nextCR = 0;
3886: int nextKeyColumn = 0;
3887: int[] keyColumns = cd.getIndexDescriptor()
3888: .baseColumnPositions();
3889:
3890: // Walk through the CRs
3891: for (; nextCR < crs.length; nextCR++) {
3892: /* If we've walked through all of the key columns then
3893: * we need to check if the index is unique.
3894: * Beetle 4402
3895: */
3896: if (nextKeyColumn == keyColumns.length) {
3897: if (cd.getIndexDescriptor().isUnique()) {
3898: break;
3899: } else {
3900: return false;
3901: }
3902: }
3903: if (crs[nextCR].getColumnNumber() == keyColumns[nextKeyColumn]) {
3904: nextKeyColumn++;
3905: continue;
3906: } else {
3907: while (crs[nextCR].getColumnNumber() != keyColumns[nextKeyColumn]) {
3908: // Stop if there is no equality predicate on this key column
3909: if (!storeRestrictionList
3910: .hasOptimizableEqualityPredicate(this ,
3911: keyColumns[nextKeyColumn], true)) {
3912: return false;
3913: }
3914:
3915: // Advance to the next key column
3916: nextKeyColumn++;
3917:
3918: /* If we've walked through all of the key columns then
3919: * we need to check if the index is unique.
3920: */
3921: if (nextKeyColumn == keyColumns.length) {
3922: if (cd.getIndexDescriptor().isUnique()) {
3923: break;
3924: } else {
3925: return false;
3926: }
3927: }
3928: }
3929: }
3930: }
3931: return true;
3932: }
3933:
3934: /**
3935: * Is this a one-row result set with the given conglomerate descriptor?
3936: */
3937: private boolean isOneRowResultSet(ConglomerateDescriptor cd,
3938: OptimizablePredicateList predList) throws StandardException {
3939: if (predList == null) {
3940: return false;
3941: }
3942:
3943: if (SanityManager.DEBUG) {
3944: if (!(predList instanceof PredicateList)) {
3945: SanityManager
3946: .THROWASSERT("predList should be a PredicateList, but is a "
3947: + predList.getClass().getName());
3948: }
3949: }
3950:
3951: PredicateList restrictionList = (PredicateList) predList;
3952:
3953: if (!cd.isIndex()) {
3954: return false;
3955: }
3956:
3957: IndexRowGenerator irg = cd.getIndexDescriptor();
3958:
3959: // is this a unique index
3960: if (!irg.isUnique()) {
3961: return false;
3962: }
3963:
3964: int[] baseColumnPositions = irg.baseColumnPositions();
3965:
3966: DataDictionary dd = getDataDictionary();
3967:
3968: // Do we have an exact match on the full key
3969: for (int index = 0; index < baseColumnPositions.length; index++) {
3970: // get the column number at this position
3971: int curCol = baseColumnPositions[index];
3972:
3973: /* Is there a pushable equality predicate on this key column?
3974: * (IS NULL is also acceptable)
3975: */
3976: if (!restrictionList.hasOptimizableEqualityPredicate(this ,
3977: curCol, true)) {
3978: return false;
3979: }
3980:
3981: }
3982:
3983: return true;
3984: }
3985:
3986: private int getDefaultBulkFetch() throws StandardException {
3987: int valInt;
3988: String valStr = PropertyUtil.getServiceProperty(
3989: getLanguageConnectionContext().getTransactionCompile(),
3990: LanguageProperties.BULK_FETCH_PROP,
3991: LanguageProperties.BULK_FETCH_DEFAULT);
3992:
3993: valInt = getIntProperty(valStr,
3994: LanguageProperties.BULK_FETCH_PROP);
3995:
3996: // verify that the specified value is valid
3997: if (valInt <= 0) {
3998: throw StandardException.newException(
3999: SQLState.LANG_INVALID_BULK_FETCH_VALUE, String
4000: .valueOf(valInt));
4001: }
4002:
4003: /*
4004: ** If the value is <= 1, then reset it
4005: ** to UNSET -- this is how customers can
4006: ** override the bulkFetch default to turn
4007: ** it off.
4008: */
4009: return (valInt <= 1) ? UNSET : valInt;
4010: }
4011:
4012: private String getUserSpecifiedIndexName() {
4013: String retval = null;
4014:
4015: if (tableProperties != null) {
4016: retval = tableProperties.getProperty("index");
4017: }
4018:
4019: return retval;
4020: }
4021:
4022: /*
4023: ** RESOLVE: This whole thing should probably be moved somewhere else,
4024: ** like the optimizer or the data dictionary.
4025: */
4026: private StoreCostController getStoreCostController(
4027: ConglomerateDescriptor cd) throws StandardException {
4028: return getCompilerContext().getStoreCostController(
4029: cd.getConglomerateNumber());
4030: }
4031:
4032: private StoreCostController getBaseCostController()
4033: throws StandardException {
4034: return getStoreCostController(baseConglomerateDescriptor);
4035: }
4036:
4037: private boolean gotRowCount = false;
4038: private long rowCount = 0;
4039:
4040: private long baseRowCount() throws StandardException {
4041: if (!gotRowCount) {
4042: StoreCostController scc = getBaseCostController();
4043: rowCount = scc.getEstimatedRowCount();
4044: gotRowCount = true;
4045: }
4046:
4047: return rowCount;
4048: }
4049:
4050: private DataValueDescriptor[] getRowTemplate(
4051: ConglomerateDescriptor cd, StoreCostController scc)
4052: throws StandardException {
4053: /*
4054: ** If it's for a heap scan, just get all the columns in the
4055: ** table.
4056: */
4057: if (!cd.isIndex())
4058: return templateColumns.buildEmptyRow().getRowArray();
4059:
4060: /* It's an index scan, so get all the columns in the index */
4061: ExecRow emptyIndexRow = templateColumns.buildEmptyIndexRow(
4062: tableDescriptor, cd, scc, getDataDictionary());
4063:
4064: return emptyIndexRow.getRowArray();
4065: }
4066:
4067: private ConglomerateDescriptor getFirstConglom()
4068: throws StandardException {
4069: getConglomDescs();
4070: return conglomDescs[0];
4071: }
4072:
4073: private ConglomerateDescriptor getNextConglom(
4074: ConglomerateDescriptor currCD) throws StandardException {
4075: int index = 0;
4076:
4077: for (; index < conglomDescs.length; index++) {
4078: if (currCD == conglomDescs[index]) {
4079: break;
4080: }
4081: }
4082:
4083: if (index < conglomDescs.length - 1) {
4084: return conglomDescs[index + 1];
4085: } else {
4086: return null;
4087: }
4088: }
4089:
4090: private void getConglomDescs() throws StandardException {
4091: if (conglomDescs == null) {
4092: conglomDescs = tableDescriptor.getConglomerateDescriptors();
4093: }
4094: }
4095:
4096: /**
4097: * set the Information gathered from the parent table that is
4098: * required to peform a referential action on dependent table.
4099: */
4100: public void setRefActionInfo(long fkIndexConglomId,
4101: int[] fkColArray, String parentResultSetId,
4102: boolean dependentScan) {
4103:
4104: this.fkIndexConglomId = fkIndexConglomId;
4105: this.fkColArray = fkColArray;
4106: this.raParentResultSetId = parentResultSetId;
4107: this.raDependentScan = dependentScan;
4108: }
4109:
4110: }
|