001: /*
002:
003: Derby - Class org.apache.derby.iapi.store.access.DiskHashtable
004:
005: Licensed to the Apache Software Foundation (ASF) under one or more
006: contributor license agreements. See the NOTICE file distributed with
007: this work for additional information regarding copyright ownership.
008: The ASF licenses this file to you under the Apache License, Version 2.0
009: (the "License"); you may not use this file except in compliance with
010: the License. You may obtain a copy of the License at
011:
012: http://www.apache.org/licenses/LICENSE-2.0
013:
014: Unless required by applicable law or agreed to in writing, software
015: distributed under the License is distributed on an "AS IS" BASIS,
016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
017: See the License for the specific language governing permissions and
018: limitations under the License.
019:
020: */
021:
022: package org.apache.derby.iapi.store.access;
023:
024: import java.util.Enumeration;
025: import java.util.NoSuchElementException;
026: import java.util.Properties;
027: import java.util.Vector;
028: import org.apache.derby.iapi.error.StandardException;
029: import org.apache.derby.iapi.services.io.FormatableBitSet;
030: import org.apache.derby.iapi.types.DataValueDescriptor;
031: import org.apache.derby.iapi.types.SQLInteger;
032: import org.apache.derby.impl.store.access.heap.HeapRowLocation;
033: import org.apache.derby.iapi.types.RowLocation;
034: import org.apache.derby.iapi.services.context.ContextService;
035: import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
036:
037: /**
038: * This class is used by BackingStoreHashtable when the BackingStoreHashtable must spill to disk.
039: * It implements the methods of a hash table: put, get, remove, elements, however it is not implemented
040: * as a hash table. In order to minimize the amount of unique code it is implemented using a Btree and a heap
041: * conglomerate. The Btree indexes the hash code of the row key. The actual key may be too long for
042: * our Btree implementation.
043: *
044: * Created: Fri Jan 28 13:58:03 2005
045: *
046: * @author <a href="mailto:klebanof@us.ibm.com">Jack Klebanoff</a>
047: * @version 1.0
048: */
049:
050: public class DiskHashtable {
051: private final long rowConglomerateId;
052: private ConglomerateController rowConglomerate;
053: private final long btreeConglomerateId;
054: private ConglomerateController btreeConglomerate;
055: private final DataValueDescriptor[] btreeRow;
056: private final int[] key_column_numbers;
057: private final boolean remove_duplicates;
058: private final TransactionController tc;
059: private final DataValueDescriptor[] row;
060: private final DataValueDescriptor[] scanKey = { new SQLInteger() };
061: private int size;
062: private boolean keepStatistics;
063:
064: /**
065: * Creates a new <code>DiskHashtable</code> instance.
066: *
067: * @param tc
068: * @param template An array of DataValueDescriptors that serves as a template for the rows.
069: * @param key_column_numbers The indexes of the key columns (0 based)
070: * @param remove_duplicates If true then rows with duplicate keys are removed
071: * @param keepAfterCommit If true then the hash table is kept after a commit
072: */
073: public DiskHashtable(TransactionController tc,
074: DataValueDescriptor[] template, int[] key_column_numbers,
075: boolean remove_duplicates, boolean keepAfterCommit)
076: throws StandardException {
077: this .tc = tc;
078: this .key_column_numbers = key_column_numbers;
079: this .remove_duplicates = remove_duplicates;
080: LanguageConnectionContext lcc = (LanguageConnectionContext) ContextService
081: .getContextOrNull(LanguageConnectionContext.CONTEXT_ID);
082: keepStatistics = (lcc != null)
083: && lcc.getRunTimeStatisticsMode();
084: row = new DataValueDescriptor[template.length];
085: for (int i = 0; i < row.length; i++)
086: row[i] = template[i].getNewNull();
087: int tempFlags = keepAfterCommit ? (TransactionController.IS_TEMPORARY | TransactionController.IS_KEPT)
088: : TransactionController.IS_TEMPORARY;
089:
090: rowConglomerateId = tc.createConglomerate("heap", template,
091: (ColumnOrdering[]) null, (Properties) null, tempFlags);
092: rowConglomerate = tc
093: .openConglomerate(rowConglomerateId, keepAfterCommit,
094: TransactionController.OPENMODE_FORUPDATE,
095: TransactionController.MODE_TABLE,
096: TransactionController.ISOLATION_NOLOCK /* Single thread only */);
097:
098: btreeRow = new DataValueDescriptor[] { new SQLInteger(),
099: rowConglomerate.newRowLocationTemplate() };
100: Properties btreeProps = new Properties();
101: btreeProps.put("baseConglomerateId", String
102: .valueOf(rowConglomerateId));
103: btreeProps.put("rowLocationColumn", "1");
104: btreeProps.put("allowDuplicates", "false"); // Because the row location is part of the key
105: btreeProps.put("nKeyFields", "2"); // Include the row location column
106: btreeProps.put("nUniqueColumns", "2"); // Include the row location column
107: btreeProps.put("maintainParentLinks", "false");
108: btreeConglomerateId = tc.createConglomerate("BTREE", btreeRow,
109: (ColumnOrdering[]) null, btreeProps, tempFlags);
110:
111: btreeConglomerate = tc
112: .openConglomerate(btreeConglomerateId, keepAfterCommit,
113: TransactionController.OPENMODE_FORUPDATE,
114: TransactionController.MODE_TABLE,
115: TransactionController.ISOLATION_NOLOCK /* Single thread only */);
116: } // end of constructor
117:
118: public void close() throws StandardException {
119: btreeConglomerate.close();
120: rowConglomerate.close();
121: tc.dropConglomerate(btreeConglomerateId);
122: tc.dropConglomerate(rowConglomerateId);
123: } // end of close
124:
125: /**
126: * Put a new row in the overflow structure.
127: *
128: * @param row The row to be inserted.
129: *
130: * @return true if the row was added,
131: * false if it was not added (because it was a duplicate and we are eliminating duplicates).
132: *
133: * @exception StandardException standard error policy
134: */
135: public boolean put(Object key, Object[] row)
136: throws StandardException {
137: boolean isDuplicate = false;
138: if (remove_duplicates || keepStatistics) {
139: // Go to the work of finding out whether it is a duplicate
140: isDuplicate = (getRemove(key, false, true) != null);
141: if (remove_duplicates && isDuplicate)
142: return false;
143: }
144: rowConglomerate.insertAndFetchLocation(
145: (DataValueDescriptor[]) row, (RowLocation) btreeRow[1]);
146: btreeRow[0].setValue(key.hashCode());
147: btreeConglomerate.insert(btreeRow);
148: if (keepStatistics && !isDuplicate)
149: size++;
150: return true;
151: } // end of put
152:
153: /**
154: * Get a row from the overflow structure.
155: *
156: * @param key If the rows only have one key column then the key value. If there is more than one
157: * key column then a KeyHasher
158: *
159: * @return null if there is no corresponding row,
160: * the row (DataValueDescriptor[]) if there is exactly one row with the key
161: * a Vector of all the rows with the key if there is more than one.
162: *
163: * @exception StandardException
164: */
165: public Object get(Object key) throws StandardException {
166: return getRemove(key, false, false);
167: }
168:
169: private Object getRemove(Object key, boolean remove,
170: boolean existenceOnly) throws StandardException {
171: int hashCode = key.hashCode();
172: int rowCount = 0;
173: Object retValue = null;
174:
175: scanKey[0].setValue(hashCode);
176: ScanController scan = tc.openScan(
177: btreeConglomerateId,
178: false, // do not hold
179: remove ? TransactionController.OPENMODE_FORUPDATE : 0,
180: TransactionController.MODE_TABLE,
181: TransactionController.ISOLATION_READ_UNCOMMITTED,
182: null, // Scan all the columns
183: scanKey, ScanController.GE, (Qualifier[][]) null,
184: scanKey, ScanController.GT);
185: try {
186: while (scan.fetchNext(btreeRow)) {
187: if (rowConglomerate
188: .fetch((RowLocation) btreeRow[1], row,
189: (FormatableBitSet) null /* all columns */)
190: && rowMatches(row, key)) {
191: if (existenceOnly)
192: return this ;
193:
194: rowCount++;
195: if (rowCount == 1) {
196: retValue = BackingStoreHashtable
197: .shallowCloneRow(row);
198: } else {
199: Vector v;
200: if (rowCount == 2) {
201: v = new Vector(2);
202: v.add(retValue);
203: retValue = v;
204: } else {
205: v = (Vector) retValue;
206: }
207: v.add(BackingStoreHashtable
208: .shallowCloneRow(row));
209: }
210: if (remove) {
211: rowConglomerate
212: .delete((RowLocation) btreeRow[1]);
213: scan.delete();
214: size--;
215: }
216: if (remove_duplicates)
217: // This must be the only row with the key
218: return retValue;
219: }
220: }
221: } finally {
222: scan.close();
223: }
224: return retValue;
225: } // end of getRemove
226:
227: private boolean rowMatches(DataValueDescriptor[] row, Object key) {
228: if (key_column_numbers.length == 1)
229: return row[key_column_numbers[0]].equals(key);
230:
231: KeyHasher kh = (KeyHasher) key;
232: for (int i = 0; i < key_column_numbers.length; i++) {
233: if (!row[key_column_numbers[i]].equals(kh.getObject(i)))
234: return false;
235: }
236: return true;
237: } // end of rowMatches
238:
239: /**
240: * remove all rows with a given key from the hash table.
241: *
242: * @param key The key of the rows to remove.
243: *
244: * @return The removed row(s).
245: *
246: * @exception StandardException Standard exception policy.
247: **/
248: public Object remove(Object key) throws StandardException {
249: return getRemove(key, true, false);
250: } // end of remove
251:
252: /**
253: * @return The number of rows in the hash table
254: */
255: public int size() {
256: return size;
257: }
258:
259: /**
260: * Return an Enumeration that can be used to scan entire table.
261: * <p>
262: * RESOLVE - is it worth it to support this routine?
263: *
264: * @return The Enumeration.
265: *
266: * @exception StandardException Standard exception policy.
267: **/
268: public Enumeration elements() throws StandardException {
269: return new ElementEnum();
270: }
271:
272: private class ElementEnum implements Enumeration {
273: private ScanController scan;
274: private boolean hasMore;
275:
276: ElementEnum() {
277: try {
278: scan = tc.openScan(
279: rowConglomerateId,
280: false, // do not hold
281: 0, // read only
282: TransactionController.MODE_TABLE,
283: TransactionController.ISOLATION_NOLOCK,
284: (FormatableBitSet) null, // all columns
285: (DataValueDescriptor[]) null, // no start key
286: 0, // no start key operator
287: (Qualifier[][]) null,
288: (DataValueDescriptor[]) null, // no stop key
289: 0 /* no stop key operator */);
290: hasMore = scan.next();
291: if (!hasMore) {
292: scan.close();
293: scan = null;
294: }
295: } catch (StandardException se) {
296: hasMore = false;
297: if (scan != null) {
298: try {
299: scan.close();
300: } catch (StandardException se1) {
301: }
302: ;
303: scan = null;
304: }
305: }
306: } // end of constructor
307:
308: public boolean hasMoreElements() {
309: return hasMore;
310: }
311:
312: public Object nextElement() {
313: if (!hasMore)
314: throw new NoSuchElementException();
315: try {
316: scan.fetch(row);
317: Object retValue = BackingStoreHashtable
318: .shallowCloneRow(row);
319: hasMore = scan.next();
320: if (!hasMore) {
321: scan.close();
322: scan = null;
323: }
324:
325: return retValue;
326: } catch (StandardException se) {
327: if (scan != null) {
328: try {
329: scan.close();
330: } catch (StandardException se1) {
331: }
332: ;
333: scan = null;
334: }
335: throw new NoSuchElementException();
336: }
337: } // end of nextElement
338: } // end of class ElementEnum
339: }
|