001: /*
002:
003: Derby - Class org.apache.derby.impl.sql.compile.SingleChildResultSetNode
004:
005: Licensed to the Apache Software Foundation (ASF) under one or more
006: contributor license agreements. See the NOTICE file distributed with
007: this work for additional information regarding copyright ownership.
008: The ASF licenses this file to you under the Apache License, Version 2.0
009: (the "License"); you may not use this file except in compliance with
010: the License. You may obtain a copy of the License at
011:
012: http://www.apache.org/licenses/LICENSE-2.0
013:
014: Unless required by applicable law or agreed to in writing, software
015: distributed under the License is distributed on an "AS IS" BASIS,
016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
017: See the License for the specific language governing permissions and
018: limitations under the License.
019:
020: */
021:
022: package org.apache.derby.impl.sql.compile;
023:
024: import org.apache.derby.iapi.services.context.ContextManager;
025:
026: import org.apache.derby.iapi.sql.compile.AccessPath;
027: import org.apache.derby.iapi.sql.compile.CostEstimate;
028: import org.apache.derby.iapi.sql.compile.Optimizable;
029: import org.apache.derby.iapi.sql.compile.OptimizableList;
030: import org.apache.derby.iapi.sql.compile.OptimizablePredicate;
031: import org.apache.derby.iapi.sql.compile.OptimizablePredicateList;
032: import org.apache.derby.iapi.sql.compile.Optimizer;
033: import org.apache.derby.iapi.sql.compile.Visitable;
034: import org.apache.derby.iapi.sql.compile.Visitor;
035: import org.apache.derby.iapi.sql.compile.RequiredRowOrdering;
036: import org.apache.derby.iapi.sql.compile.C_NodeTypes;
037:
038: import org.apache.derby.iapi.sql.dictionary.DataDictionary;
039: import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
040:
041: import org.apache.derby.iapi.sql.Activation;
042: import org.apache.derby.iapi.sql.ResultSet;
043:
044: import org.apache.derby.iapi.error.StandardException;
045:
046: import org.apache.derby.impl.sql.compile.ActivationClassBuilder;
047:
048: import org.apache.derby.iapi.services.sanity.SanityManager;
049:
050: import org.apache.derby.iapi.util.JBitSet;
051:
052: import java.util.Properties;
053: import java.util.Vector;
054:
055: /**
056: * A SingleChildResultSetNode represents a result set with a single child.
057: *
058: * @author Jerry Brenner
059: */
060:
061: abstract class SingleChildResultSetNode extends FromTable {
062: /**
063: * ResultSetNode under the SingleChildResultSetNode
064: */
065: ResultSetNode childResult;
066:
067: // Does this node have the truly... for the underlying tree
068: protected boolean hasTrulyTheBestAccessPath;
069:
070: /**
071: * Initialilzer for a SingleChildResultSetNode.
072: *
073: * @param childResult The child ResultSetNode
074: * @param tableProperties Properties list associated with the table
075: */
076:
077: public void init(Object childResult, Object tableProperties) {
078: /* correlationName is always null */
079: super .init(null, tableProperties);
080: this .childResult = (ResultSetNode) childResult;
081:
082: /* Propagate the child's referenced table map, if one exists */
083: if (this .childResult.getReferencedTableMap() != null) {
084: referencedTableMap = (JBitSet) this .childResult
085: .getReferencedTableMap().clone();
086: }
087: }
088:
089: /** @see Optimizable#getTrulyTheBestAccessPath */
090: public AccessPath getTrulyTheBestAccessPath() {
091: if (hasTrulyTheBestAccessPath) {
092: return super .getTrulyTheBestAccessPath();
093: }
094:
095: if (childResult instanceof Optimizable)
096: return ((Optimizable) childResult)
097: .getTrulyTheBestAccessPath();
098:
099: return super .getTrulyTheBestAccessPath();
100: }
101:
102: /**
103: * Return the childResult from this node.
104: *
105: * @return ResultSetNode The childResult from this node.
106: */
107: public ResultSetNode getChildResult() {
108: return childResult;
109: }
110:
111: /**
112: * Set the childResult for this node.
113: *
114: * @param childResult The new childResult for this node.
115: */
116: void setChildResult(ResultSetNode childResult) {
117: this .childResult = childResult;
118: }
119:
120: /**
121: * @see Optimizable#pullOptPredicates
122: *
123: * @exception StandardException Thrown on error
124: */
125: public void pullOptPredicates(
126: OptimizablePredicateList optimizablePredicates)
127: throws StandardException {
128: if (childResult instanceof Optimizable) {
129: ((Optimizable) childResult)
130: .pullOptPredicates(optimizablePredicates);
131: }
132: }
133:
134: /** @see Optimizable#forUpdate */
135: public boolean forUpdate() {
136: if (childResult instanceof Optimizable) {
137: return ((Optimizable) childResult).forUpdate();
138: } else {
139: return super .forUpdate();
140: }
141: }
142:
143: /**
144: * @see Optimizable#initAccessPaths
145: */
146: public void initAccessPaths(Optimizer optimizer) {
147: super .initAccessPaths(optimizer);
148: if (childResult instanceof Optimizable) {
149: ((Optimizable) childResult).initAccessPaths(optimizer);
150: }
151: }
152:
153: /**
154: * @see Optimizable#updateBestPlanMap
155: *
156: * Makes a call to add/load/remove a plan mapping for this node,
157: * and then makes the necessary call to recurse on this node's
158: * child, in order to ensure that we've handled the full plan
159: * all the way down this node's subtree.
160: */
161: public void updateBestPlanMap(short action, Object planKey)
162: throws StandardException {
163: super .updateBestPlanMap(action, planKey);
164:
165: // Now walk the child. Note that if the child is not an
166: // Optimizable and the call to child.getOptimizerImpl()
167: // returns null, then that means we haven't tried to optimize
168: // the child yet. So in that case there's nothing to
169: // add/load.
170:
171: if (childResult instanceof Optimizable) {
172: ((Optimizable) childResult).updateBestPlanMap(action,
173: planKey);
174: } else if (childResult.getOptimizerImpl() != null) {
175: childResult.getOptimizerImpl().updateBestPlanMaps(action,
176: planKey);
177: }
178: }
179:
180: /**
181: * Prints the sub-nodes of this object. See QueryTreeNode.java for
182: * how tree printing is supposed to work.
183: *
184: * @param depth The depth of this node in the tree
185: */
186:
187: public void printSubNodes(int depth) {
188: if (SanityManager.DEBUG) {
189: super .printSubNodes(depth);
190:
191: if (childResult != null) {
192: printLabel(depth, "childResult: ");
193: childResult.treePrint(depth + 1);
194: }
195: }
196: }
197:
198: /**
199: * Search to see if a query references the specifed table name.
200: *
201: * @param name Table name (String) to search for.
202: * @param baseTable Whether or not name is for a base table
203: *
204: * @return true if found, else false
205: *
206: * @exception StandardException Thrown on error
207: */
208: public boolean referencesTarget(String name, boolean baseTable)
209: throws StandardException {
210: return childResult.referencesTarget(name, baseTable);
211: }
212:
213: /**
214: * Return true if the node references SESSION schema tables (temporary or permanent)
215: *
216: * @return true if references SESSION schema tables, else false
217: *
218: * @exception StandardException Thrown on error
219: */
220: public boolean referencesSessionSchema() throws StandardException {
221: return childResult.referencesSessionSchema();
222: }
223:
224: /**
225: * Set the (query block) level (0-based) for this FromTable.
226: *
227: * @param level The query block level for this FromTable.
228: */
229: public void setLevel(int level) {
230: super .setLevel(level);
231: if (childResult instanceof FromTable) {
232: ((FromTable) childResult).setLevel(level);
233: }
234: }
235:
236: /**
237: * Return whether or not this ResultSetNode contains a subquery with a
238: * reference to the specified target.
239: *
240: * @param name The table name.
241: * @param baseTable Whether or not the name is for a base table.
242: *
243: * @return boolean Whether or not a reference to the table was found.
244: *
245: * @exception StandardException Thrown on error
246: */
247: boolean subqueryReferencesTarget(String name, boolean baseTable)
248: throws StandardException {
249: return childResult.subqueryReferencesTarget(name, baseTable);
250: }
251:
252: /**
253: * Put a ProjectRestrictNode on top of each FromTable in the FromList.
254: * ColumnReferences must continue to point to the same ResultColumn, so
255: * that ResultColumn must percolate up to the new PRN. However,
256: * that ResultColumn will point to a new expression, a VirtualColumnNode,
257: * which points to the FromTable and the ResultColumn that is the source for
258: * the ColumnReference.
259: * (The new PRN will have the original of the ResultColumnList and
260: * the ResultColumns from that list. The FromTable will get shallow copies
261: * of the ResultColumnList and its ResultColumns. ResultColumn.expression
262: * will remain at the FromTable, with the PRN getting a new
263: * VirtualColumnNode for each ResultColumn.expression.)
264: * We then project out the non-referenced columns. If there are no referenced
265: * columns, then the PRN's ResultColumnList will consist of a single ResultColumn
266: * whose expression is 1.
267: *
268: * @param numTables Number of tables in the DML Statement
269: * @param gbl The group by list, if any
270: * @param fromList The from list, if any
271: *
272: * @return The generated ProjectRestrictNode atop the original FromTable.
273: *
274: * @exception StandardException Thrown on error
275: */
276:
277: public ResultSetNode preprocess(int numTables, GroupByList gbl,
278: FromList fromList) throws StandardException {
279: childResult = childResult.preprocess(numTables, gbl, fromList);
280:
281: /* Build the referenced table map */
282: referencedTableMap = (JBitSet) childResult
283: .getReferencedTableMap().clone();
284:
285: return this ;
286: }
287:
288: /**
289: * Add a new predicate to the list. This is useful when doing subquery
290: * transformations, when we build a new predicate with the left side of
291: * the subquery operator and the subquery's result column.
292: *
293: * @param predicate The predicate to add
294: *
295: * @return ResultSetNode The new top of the tree.
296: *
297: * @exception StandardException Thrown on error
298: */
299: public ResultSetNode addNewPredicate(Predicate predicate)
300: throws StandardException {
301: childResult = childResult.addNewPredicate(predicate);
302: return this ;
303: }
304:
305: /**
306: * Push expressions down to the first ResultSetNode which can do expression
307: * evaluation and has the same referenced table map.
308: * RESOLVE - This means only pushing down single table expressions to
309: * DistinctNodes today. Once we have a better understanding of how
310: * the optimizer will work, we can push down join clauses.
311: *
312: * @param predicateList The PredicateList.
313: *
314: * @exception StandardException Thrown on error
315: */
316: public void pushExpressions(PredicateList predicateList)
317: throws StandardException {
318: if (childResult instanceof FromTable) {
319: ((FromTable) childResult).pushExpressions(predicateList);
320: }
321: }
322:
323: /**
324: * Evaluate whether or not the subquery in a FromSubquery is flattenable.
325: * Currently, a FSqry is flattenable if all of the following are true:
326: * o Subquery is a SelectNode.
327: * o It contains no top level subqueries. (RESOLVE - we can relax this)
328: * o It does not contain a group by or having clause
329: * o It does not contain aggregates.
330: *
331: * @param fromList The outer from list
332: *
333: * @return boolean Whether or not the FromSubquery is flattenable.
334: */
335: public boolean flattenableInFromSubquery(FromList fromList) {
336: /* Flattening currently involves merging predicates and FromLists.
337: * We don't have a FromList, so we can't flatten for now.
338: */
339: /* RESOLVE - this will introduce yet another unnecessary PRN */
340: return false;
341: }
342:
343: /**
344: * Ensure that the top of the RSN tree has a PredicateList.
345: *
346: * @param numTables The number of tables in the query.
347: * @return ResultSetNode A RSN tree with a node which has a PredicateList on top.
348: *
349: * @exception StandardException Thrown on error
350: */
351: public ResultSetNode ensurePredicateList(int numTables)
352: throws StandardException {
353: return this ;
354: }
355:
356: /**
357: * Optimize this SingleChildResultSetNode.
358: *
359: * @param dataDictionary The DataDictionary to use for optimization
360: * @param predicates The PredicateList to optimize. This should
361: * be a join predicate.
362: * @param outerRows The number of outer joining rows
363: *
364: * @return ResultSetNode The top of the optimized subtree
365: *
366: * @exception StandardException Thrown on error
367: */
368:
369: public ResultSetNode optimize(DataDictionary dataDictionary,
370: PredicateList predicates, double outerRows)
371: throws StandardException {
372: /* We need to implement this method since a NRSN can appear above a
373: * SelectNode in a query tree.
374: */
375: childResult = childResult.optimize(dataDictionary, predicates,
376: outerRows);
377:
378: Optimizer optimizer = getOptimizer((FromList) getNodeFactory()
379: .getNode(C_NodeTypes.FROM_LIST,
380: getNodeFactory().doJoinOrderOptimization(),
381: getContextManager()), predicates,
382: dataDictionary, (RequiredRowOrdering) null);
383: costEstimate = optimizer.newCostEstimate();
384: costEstimate.setCost(childResult.getCostEstimate()
385: .getEstimatedCost(), childResult.getCostEstimate()
386: .rowCount(), childResult.getCostEstimate()
387: .singleScanRowCount());
388:
389: return this ;
390: }
391:
392: /**
393: * @see ResultSetNode#modifyAccessPaths
394: *
395: * @exception StandardException Thrown on error
396: */
397: public ResultSetNode modifyAccessPaths() throws StandardException {
398: childResult = childResult.modifyAccessPaths();
399:
400: return this ;
401: }
402:
403: /**
404: * @see ResultSetNode#changeAccessPath
405: *
406: * @exception StandardException Thrown on error
407: */
408: public ResultSetNode changeAccessPath() throws StandardException {
409: childResult = childResult.changeAccessPath();
410: return this ;
411: }
412:
413: /**
414: * Determine whether or not the specified name is an exposed name in
415: * the current query block.
416: *
417: * @param name The specified name to search for as an exposed name.
418: * @param schemaName Schema name, if non-null.
419: * @param exactMatch Whether or not we need an exact match on specified schema and table
420: * names or match on table id.
421: *
422: * @return The FromTable, if any, with the exposed name.
423: *
424: * @exception StandardException Thrown on error
425: */
426: protected FromTable getFromTableByName(String name,
427: String schemaName, boolean exactMatch)
428: throws StandardException {
429: return childResult.getFromTableByName(name, schemaName,
430: exactMatch);
431: }
432:
433: /**
434: * Decrement (query block) level (0-based) for this FromTable.
435: * This is useful when flattening a subquery.
436: *
437: * @param decrement The amount to decrement by.
438: */
439: void decrementLevel(int decrement) {
440: super .decrementLevel(decrement);
441: childResult.decrementLevel(decrement);
442: }
443:
444: /**
445: * Get the lock mode for the target of an update statement
446: * (a delete or update). The update mode will always be row for
447: * CurrentOfNodes. It will be table if there is no where clause.
448: *
449: * @return The lock mode
450: */
451: public int updateTargetLockMode() {
452: return childResult.updateTargetLockMode();
453: }
454:
455: /**
456: * Return whether or not the underlying ResultSet tree
457: * is ordered on the specified columns.
458: * RESOLVE - This method currently only considers the outermost table
459: * of the query block.
460: *
461: * @param crs The specified ColumnReference[]
462: * @param permuteOrdering Whether or not the order of the CRs in the array can be permuted
463: * @param fbtVector Vector that is to be filled with the FromBaseTable
464: *
465: * @return Whether the underlying ResultSet tree
466: * is ordered on the specified column.
467: *
468: * @exception StandardException Thrown on error
469: */
470: boolean isOrderedOn(ColumnReference[] crs, boolean permuteOrdering,
471: Vector fbtVector) throws StandardException {
472: return childResult.isOrderedOn(crs, permuteOrdering, fbtVector);
473: }
474:
475: /**
476: * Return whether or not the underlying ResultSet tree will return
477: * a single row, at most.
478: * This is important for join nodes where we can save the extra next
479: * on the right side if we know that it will return at most 1 row.
480: *
481: * @return Whether or not the underlying ResultSet tree will return a single row.
482: * @exception StandardException Thrown on error
483: */
484: public boolean isOneRowResultSet() throws StandardException {
485: // Default is false
486: return childResult.isOneRowResultSet();
487: }
488:
489: /**
490: * Return whether or not the underlying ResultSet tree is for a NOT EXISTS join.
491: *
492: * @return Whether or not the underlying ResultSet tree is for a NOT EXISTS.
493: */
494: public boolean isNotExists() {
495: return childResult.isNotExists();
496: }
497:
498: /**
499: * Determine whether we need to do reflection in order to do the projection.
500: * Reflection is only needed if there is at least 1 column which is not
501: * simply selecting the source column.
502: *
503: * @return Whether or not we need to do reflection in order to do
504: * the projection.
505: */
506: protected boolean reflectionNeededForProjection() {
507: return !(resultColumns.allExpressionsAreColumns(childResult));
508: }
509:
510: /**
511: * Replace any DEFAULTs with the associated tree for the default.
512: *
513: * @param ttd The TableDescriptor for the target table.
514: * @param tcl The RCL for the target table.
515: *
516: * @exception StandardException Thrown on error
517: */
518: void replaceDefaults(TableDescriptor ttd, ResultColumnList tcl)
519: throws StandardException {
520: childResult.replaceDefaults(ttd, tcl);
521: }
522:
523: /**
524: * Notify the underlying result set tree that the result is
525: * ordering dependent. (For example, no bulk fetch on an index
526: * if under an IndexRowToBaseRow.)
527: */
528: void markOrderingDependent() {
529: childResult.markOrderingDependent();
530: }
531:
532: /**
533: * Get the final CostEstimate for this node.
534: *
535: * @return The final CostEstimate for this node, which is
536: * the final cost estimate for the child node.
537: */
538: public CostEstimate getFinalCostEstimate() throws StandardException {
539: /*
540: ** The cost estimate will be set here if either optimize() or
541: ** optimizeIt() was called on this node. It's also possible
542: ** that optimization was done directly on the child node,
543: ** in which case the cost estimate will be null here.
544: */
545: if (costEstimate == null)
546: return childResult.getFinalCostEstimate();
547: else {
548: return costEstimate;
549: }
550: }
551:
552: /**
553: * Accept a visitor, and call v.visit()
554: * on child nodes as necessary.
555: *
556: * @param v the visitor
557: *
558: * @exception StandardException on error
559: */
560: public Visitable accept(Visitor v) throws StandardException {
561: if (v.skipChildren(this )) {
562: return v.visit(this );
563: }
564:
565: Visitable returnNode = super .accept(v);
566:
567: if (childResult != null && !v.stopTraversal()) {
568: childResult = (ResultSetNode) childResult.accept(v);
569: }
570:
571: return returnNode;
572: }
573: }
|