001: /*
002:
003: Derby - Class org.apache.derby.iapi.store.access.conglomerate.ScanManager
004:
005: Licensed to the Apache Software Foundation (ASF) under one or more
006: contributor license agreements. See the NOTICE file distributed with
007: this work for additional information regarding copyright ownership.
008: The ASF licenses this file to you under the Apache License, Version 2.0
009: (the "License"); you may not use this file except in compliance with
010: the License. You may obtain a copy of the License at
011:
012: http://www.apache.org/licenses/LICENSE-2.0
013:
014: Unless required by applicable law or agreed to in writing, software
015: distributed under the License is distributed on an "AS IS" BASIS,
016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
017: See the License for the specific language governing permissions and
018: limitations under the License.
019:
020: */
021:
022: package org.apache.derby.iapi.store.access.conglomerate;
023:
024: import org.apache.derby.iapi.store.access.GroupFetchScanController;
025: import org.apache.derby.iapi.store.access.ScanController;
026: import org.apache.derby.iapi.store.raw.Page;
027: import org.apache.derby.iapi.error.StandardException;
028:
029: import org.apache.derby.iapi.store.access.BackingStoreHashtable;
030:
031: /**
032:
033: The ScanManager interface contains those methods private to access method
034: implementors necessary to implement Scans on Conglomerates. Client of scans
035: use the ScanController to interact with the scan.
036: <P>
037: @see ScanController
038:
039: **/
040:
041: public interface ScanManager extends ScanController,
042: GroupFetchScanController {
043:
044: /**
045: * Close scan as part of terminating a transaction.
046: * <p>
047: * Use this call to close the scan resources as part of committing or
048: * aborting a transaction. The normal close() routine may do some cleanup
049: * that is either unnecessary, or not correct due to the unknown condition
050: * of the scan following a transaction ending error. Use this call when
051: * closing all scans as part of an abort of a transaction.
052: *
053: * @param closeHeldScan If true, means to close scan even if it has been
054: * opened to be kept opened across commit. This is
055: * used to close these scans on abort.
056: *
057: * @return boolean indicating that the close has resulted in a real close
058: * of the scan. A held scan will return false if called
059: * by closeForEndTransaction(false), otherwise it will
060: * return true. A non-held scan will always return true.
061: *
062: * @exception StandardException Standard exception policy.
063: **/
064: boolean closeForEndTransaction(boolean closeHeldScan)
065: throws StandardException;
066:
067: /**
068: * Insert all rows that qualify for the current scan into the input
069: * Hash table.
070: * <p>
071: * This routine scans executes the entire scan as described in the
072: * openScan call. For every qualifying unique row value an entry is
073: * placed into the HashTable. For unique row values the entry in the
074: * Hashtable has a key value of the object stored in
075: * row[key_column_number], and the value of the data is row. For row
076: * values with duplicates, the key value is also row[key_column_number],
077: * but the value of the data is a Vector of
078: * rows. The caller will have to call "instanceof" on the data value
079: * object if duplicates are expected, to determine if the data value
080: * of the Hashtable entry is a row or is a Vector of rows.
081: * <p>
082: * Note, that for this routine to work efficiently the caller must
083: * ensure that the object in row[key_column_number] implements
084: * the hashCode and equals method as appropriate for it's datatype.
085: * <p>
086: * It is expected that this call will be the first and only call made in
087: * an openscan. Qualifiers and stop position of the openscan are applied
088: * just as in a normal scan. This call is logically equivalent to the
089: * caller performing the following:
090: *
091: * import java.util.Hashtable;
092: *
093: * hash_table = new Hashtable();
094: *
095: * while (next())
096: * {
097: * row = create_new_row();
098: * fetch(row);
099: * if ((duplicate_value =
100: * hash_table.put(row[key_column_number], row)) != null)
101: * {
102: * Vector row_vec;
103: *
104: * // inserted a duplicate
105: * if ((duplicate_value instanceof vector))
106: * {
107: * row_vec = (Vector) duplicate_value;
108: * }
109: * else
110: * {
111: * // allocate vector to hold duplicates
112: * row_vec = new Vector(2);
113: *
114: * // insert original row into vector
115: * row_vec.addElement(duplicate_value);
116: *
117: * // put the vector as the data rather than the row
118: * hash_table.put(row[key_column_number], row_vec);
119: * }
120: *
121: * // insert new row into vector
122: * row_vec.addElement(row);
123: * }
124: * }
125: * <p>
126: * The columns of the row will be the standard columns returned as
127: * part of a scan, as described by the validColumns - see openScan for
128: * description.
129: * RESOLVE - is this ok? or should I hard code somehow the row to
130: * be the first column and the row location?
131: * <p>
132: * No overflow to external storage is provided, so calling this routine
133: * on a 1 gigabyte conglomerate will incur at least 1 gigabyte of memory
134: * (probably failing with a java out of memory condition). If this
135: * routine gets an out of memory condition, or if "max_rowcnt" is
136: * exceeded then then the routine will give up, empty the Hashtable,
137: * and return "false."
138: * <p>
139: * On exit from this routine, whether the fetchSet() succeeded or not
140: * the scan is complete, it is positioned just the same as if the scan
141: * had been drained by calling "next()" until it returns false (ie.
142: * fetchNext() and next() calls will return false).
143: * reopenScan() can be called to restart the scan.
144: * <p>
145: *
146: * RESOLVE - until we get row counts what should we do for sizing the
147: * the size, capasity, and load factor of the hash table.
148: * For now it is up to the caller to create the Hashtable,
149: * Access does not reset any parameters.
150: * <p>
151: * RESOLVE - I am not sure if access should be in charge of allocating
152: * the new row objects. I know that I can do this in the
153: * case of btree's, but I don't think I can do this in heaps.
154: * Maybe this is solved by work to be done on the sort
155: * interface.
156: *
157: *
158: * @param max_rowcnt The maximum number of rows to insert into the
159: * Hash table. Pass in -1 if there is no maximum.
160: * @param key_column_numbers The column numbers of the columns in the
161: * scan result row to be the key to the Hashtable.
162: * "0" is the first column in the scan result
163: * row (which may be different than the first
164: * row in the table of the scan).
165: *
166: * @exception StandardException Standard exception policy.
167: **/
168: void fetchSet(long max_rowcnt, int[] key_column_numbers,
169: BackingStoreHashtable hash_table) throws StandardException;
170:
171: /**
172: * Do work necessary to maintain the current position in the scan.
173: * <p>
174: * The latched page in the conglomerate "congomid" is changing, do
175: * whatever is necessary to maintain the current position of the scan.
176: * For some conglomerates this may be a no-op.
177: * <p>
178: *
179: * @param conglom Conglomerate object of the conglomerate being changed.
180: * @param page Page in the conglomerate being changed.
181: *
182: * @exception StandardException Standard exception policy.
183: **/
184: public void savePosition(Conglomerate conglom, Page page)
185: throws StandardException;
186: }
|