Source Code Cross Referenced for BackingStoreHashtable.java in  » Database-DBMS » db-derby-10.2 » org » apache » derby » iapi » store » access » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Database DBMS » db derby 10.2 » org.apache.derby.iapi.store.access 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        /*
002:
003:           Derby - Class org.apache.derby.iapi.store.access.BackingStoreHashtable
004:
005:           Licensed to the Apache Software Foundation (ASF) under one or more
006:           contributor license agreements.  See the NOTICE file distributed with
007:           this work for additional information regarding copyright ownership.
008:           The ASF licenses this file to you under the Apache License, Version 2.0
009:           (the "License"); you may not use this file except in compliance with
010:           the License.  You may obtain a copy of the License at
011:
012:              http://www.apache.org/licenses/LICENSE-2.0
013:
014:           Unless required by applicable law or agreed to in writing, software
015:           distributed under the License is distributed on an "AS IS" BASIS,
016:           WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
017:           See the License for the specific language governing permissions and
018:           limitations under the License.
019:
020:         */
021:
022:        package org.apache.derby.iapi.store.access;
023:
024:        import org.apache.derby.iapi.services.sanity.SanityManager;
025:
026:        import org.apache.derby.iapi.services.io.Storable;
027:
028:        import org.apache.derby.iapi.error.StandardException;
029:
030:        import org.apache.derby.iapi.types.CloneableObject;
031:        import org.apache.derby.iapi.types.DataValueDescriptor;
032:
033:        import org.apache.derby.iapi.services.cache.ClassSize;
034:
035:        import java.util.Enumeration;
036:        import java.util.Hashtable;
037:        import java.util.Properties;
038:        import java.util.Vector;
039:        import java.util.NoSuchElementException;
040:
041:        /**
042:         A BackingStoreHashtable is a utility class which will store a set of rows into
043:         an in memory hash table, or overflow the hash table to a tempory on disk 
044:         structure.
045:         <p>
046:         All rows must contain the same number of columns, and the column at position
047:         N of all the rows must have the same format id.  If the BackingStoreHashtable needs to be
048:         overflowed to disk, then an arbitrary row will be chosen and used as a template
049:         for creating the underlying overflow container.
050:
051:         <p>
052:         The hash table will be built logically as follows (actual implementation
053:         may differ).  The important points are that the hash value is the standard
054:         java hash value on the row[key_column_numbers[0], if key_column_numbers.length is 1,
055:         or row[key_column_numbers[0, 1, ...]] if key_column_numbers.length > 1, 
056:         and that duplicate detection is done by the standard java duplicate detection provided by 
057:         java.util.Hashtable.
058:         <p>
059:         import java.util.Hashtable;
060:
061:         hash_table = new Hashtable();
062:
063:         Object[] row;
064:         boolean  needsToClone = rowSource.needsToClone();
065:
066:         while((row = rowSource.getNextRowFromRowSource()) != null)
067:         {
068:         if (needsToClone)
069:         row = clone_row_from_row(row);
070:
071:         Object key = KeyHasher.buildHashKey(row, key_column_numbers);
072:
073:         if ((duplicate_value = 
074:         hash_table.put(key, row)) != null)
075:         {
076:         Vector row_vec;
077:
078:         // inserted a duplicate
079:         if ((duplicate_value instanceof vector))
080:         {
081:         row_vec = (Vector) duplicate_value;
082:         }
083:         else
084:         {
085:         // allocate vector to hold duplicates
086:         row_vec = new Vector(2);
087:
088:         // insert original row into vector
089:         row_vec.addElement(duplicate_value);
090:
091:         // put the vector as the data rather than the row
092:         hash_table.put(key, row_vec);
093:         }
094:        
095:         // insert new row into vector
096:         row_vec.addElement(row);
097:         }
098:         }
099:
100:         **/
101:
102:        public class BackingStoreHashtable {
103:
104:            /**************************************************************************
105:             * Fields of the class
106:             **************************************************************************
107:             */
108:            private TransactionController tc;
109:            private Hashtable hash_table;
110:            private int[] key_column_numbers;
111:            private boolean remove_duplicates;
112:            private boolean skipNullKeyColumns;
113:            private Properties auxillary_runtimestats;
114:            private RowSource row_source;
115:            /* If max_inmemory_rowcnt > 0 then use that to decide when to spill to disk.
116:             * Otherwise compute max_inmemory_size based on the JVM memory size when the BackingStoreHashtable
117:             * is constructed and use that to decide when to spill to disk.
118:             */
119:            private long max_inmemory_rowcnt;
120:            private long inmemory_rowcnt;
121:            private long max_inmemory_size;
122:            private boolean keepAfterCommit;
123:
124:            /**
125:             * The estimated number of bytes used by Vector(0)
126:             */
127:            private final static int vectorSize = ClassSize
128:                    .estimateBaseFromCatalog(java.util.Vector.class);
129:
130:            private DiskHashtable diskHashtable;
131:
132:            /**************************************************************************
133:             * Constructors for This class:
134:             **************************************************************************
135:             */
136:            private BackingStoreHashtable() {
137:            }
138:
139:            /**
140:             * Create the BackingStoreHashtable from a row source.
141:             * <p>
142:             * This routine drains the RowSource.  The performance characteristics
143:             * depends on the number of rows inserted and the parameters to the 
144:             * constructor.  
145:             * <p>
146:             * If the number of rows is <= "max_inmemory_rowcnt", then the rows are
147:             * inserted into a java.util.Hashtable.  In this case no 
148:             * TransactionController is necessary, a "null" tc is valid.
149:             * <p>
150:             * If the number of rows is > "max_inmemory_rowcnt", then the rows will
151:             * be all placed in some sort of Access temporary file on disk.  This 
152:             * case requires a valid TransactionController.
153:             *
154:             * @param tc                An open TransactionController to be used if the
155:             *                          hash table needs to overflow to disk.
156:             *
157:             * @param row_source        RowSource to read rows from.
158:             *
159:             * @param key_column_numbers The column numbers of the columns in the
160:             *                          scan result row to be the key to the Hashtable.
161:             *                          "0" is the first column in the scan result
162:             *                          row (which may be different than the first
163:             *                          row in the table of the scan).
164:             *
165:             * @param remove_duplicates Should the Hashtable automatically remove
166:             *                          duplicates, or should it create the Vector of
167:             *                          duplicates?
168:             *
169:             * @param estimated_rowcnt  The estimated number of rows in the hash table.
170:             *                          Pass in -1 if there is no estimate.
171:             *
172:             * @param max_inmemory_rowcnt
173:             *                          The maximum number of rows to insert into the 
174:             *                          inmemory Hash table before overflowing to disk.
175:             *                          Pass in -1 if there is no maximum.
176:             *
177:             * @param initialCapacity   If not "-1" used to initialize the java 
178:             *                          Hashtable.
179:             *
180:             * @param loadFactor        If not "-1" used to initialize the java 
181:             *                          Hashtable.
182:             *
183:             * @param skipNullKeyColumns	Skip rows with a null key column, if true.
184:             *
185:             * @param keepAfterCommit If true the hash table is kept after a commit,
186:             *                        if false the hash table is dropped on the next commit.
187:             *
188:             *
189:             * @exception  StandardException  Standard exception policy.
190:             **/
191:            public BackingStoreHashtable(TransactionController tc,
192:                    RowSource row_source, int[] key_column_numbers,
193:                    boolean remove_duplicates, long estimated_rowcnt,
194:                    long max_inmemory_rowcnt, int initialCapacity,
195:                    float loadFactor, boolean skipNullKeyColumns,
196:                    boolean keepAfterCommit) throws StandardException {
197:                this .key_column_numbers = key_column_numbers;
198:                this .remove_duplicates = remove_duplicates;
199:                this .row_source = row_source;
200:                this .skipNullKeyColumns = skipNullKeyColumns;
201:                this .max_inmemory_rowcnt = max_inmemory_rowcnt;
202:                if (max_inmemory_rowcnt > 0)
203:                    max_inmemory_size = Long.MAX_VALUE;
204:                else
205:                    max_inmemory_size = Runtime.getRuntime().totalMemory() / 100;
206:                this .tc = tc;
207:                this .keepAfterCommit = keepAfterCommit;
208:
209:                Object[] row;
210:
211:                // use passed in capacity and loadfactor if not -1, you must specify
212:                // capacity if you want to specify loadfactor.
213:                if (initialCapacity != -1) {
214:                    hash_table = ((loadFactor == -1) ? new Hashtable(
215:                            initialCapacity) : new Hashtable(initialCapacity,
216:                            loadFactor));
217:                } else {
218:                    /* We want to create the hash table based on the estimated row
219:                     * count if a) we have an estimated row count (i.e. it's greater
220:                     * than zero) and b) we think we can create a hash table to
221:                     * hold the estimated row count without running out of memory.
222:                     * The check for "b" is required because, for deeply nested
223:                     * queries and/or queries with a high number of tables in
224:                     * their FROM lists, the optimizer can end up calculating
225:                     * some very high row count estimates--even up to the point of
226:                     * Double.POSITIVE_INFINITY (see DERBY-1259 for an explanation
227:                     * of how that can happen).  In that case any attempts to
228:                     * create a Hashtable of size estimated_rowcnt can cause
229:                     * OutOfMemory errors when we try to create the Hashtable.
230:                     * So as a "red flag" for that kind of situation, we check to
231:                     * see if the estimated row count is greater than the max
232:                     * in-memory size for this table.  Unit-wise this comparison
233:                     * is relatively meaningless: rows vs bytes.  But if our
234:                     * estimated row count is greater than the max number of
235:                     * in-memory bytes that we're allowed to consume, then
236:                     * it's very likely that creating a Hashtable with a capacity
237:                     * of estimated_rowcnt will lead to memory problems.  So in
238:                     * that particular case we leave hash_table null here and
239:                     * initialize it further below, using the estimated in-memory
240:                     * size of the first row to figure out what a reasonable size
241:                     * for the Hashtable might be.
242:                     */
243:                    hash_table = (((estimated_rowcnt <= 0) || (row_source == null)) ? new Hashtable()
244:                            : (estimated_rowcnt < max_inmemory_size) ? new Hashtable(
245:                                    (int) estimated_rowcnt)
246:                                    : null);
247:                }
248:
249:                if (row_source != null) {
250:                    boolean needsToClone = row_source.needsToClone();
251:
252:                    while ((row = getNextRowFromRowSource()) != null) {
253:                        // If we haven't initialized the hash_table yet then that's
254:                        // because a Hashtable with capacity estimated_rowcnt would
255:                        // probably cause memory problems.  So look at the first row
256:                        // that we found and use that to create the hash table with
257:                        // an initial capacity such that, if it was completely full,
258:                        // it would still satisfy the max_inmemory condition.  Note
259:                        // that this isn't a hard limit--the hash table can grow if
260:                        // needed.
261:                        if (hash_table == null) {
262:                            // Check to see how much memory we think the first row
263:                            // is going to take, and then use that to set the initial
264:                            // capacity of the Hashtable.
265:                            double rowUsage = getEstimatedMemUsage(row);
266:                            hash_table = new Hashtable(
267:                                    (int) (max_inmemory_size / rowUsage));
268:                        }
269:
270:                        add_row_to_hash_table(hash_table, row, needsToClone);
271:                    }
272:                }
273:
274:                // In the (unlikely) event that we received a "red flag" estimated_rowcnt
275:                // that is too big (see comments above), it's possible that, if row_source
276:                // was null or else didn't have any rows, hash_table could still be null
277:                // at this point.  So we initialize it to an empty hashtable (representing
278:                // an empty result set) so that calls to other methods on this
279:                // BackingStoreHashtable (ex. "size()") will have a working hash_table
280:                // on which to operate.
281:                if (hash_table == null)
282:                    hash_table = new Hashtable();
283:            }
284:
285:            /**************************************************************************
286:             * Private/Protected methods of This class:
287:             **************************************************************************
288:             */
289:
290:            /**
291:             * Call method to either get next row or next row with non-null
292:             * key columns.
293:             *
294:             *
295:             * @exception  StandardException  Standard exception policy.
296:             */
297:            private Object[] getNextRowFromRowSource() throws StandardException {
298:                Object[] row = row_source.getNextRowFromRowSource();
299:
300:                if (skipNullKeyColumns) {
301:                    while (row != null) {
302:                        // Are any key columns null?
303:                        int index = 0;
304:                        for (; index < key_column_numbers.length; index++) {
305:                            if (SanityManager.DEBUG) {
306:                                if (!(row[key_column_numbers[index]] instanceof  Storable)) {
307:                                    SanityManager
308:                                            .THROWASSERT("row[key_column_numbers[index]] expected to be Storable, not "
309:                                                    + row[key_column_numbers[index]]
310:                                                            .getClass()
311:                                                            .getName());
312:                                }
313:                            }
314:                            Storable storable = (Storable) row[key_column_numbers[index]];
315:                            if (storable.isNull()) {
316:                                break;
317:                            }
318:                        }
319:                        // No null key columns
320:                        if (index == key_column_numbers.length) {
321:                            return row;
322:                        }
323:                        // 1 or more null key columns
324:                        row = row_source.getNextRowFromRowSource();
325:                    }
326:                }
327:                return row;
328:            }
329:
330:            /**
331:             * Return a cloned copy of the row.
332:             *
333:             * @return The cloned row row to use.
334:             *
335:             * @exception  StandardException  Standard exception policy.
336:             **/
337:            static Object[] cloneRow(Object[] old_row) throws StandardException {
338:                Object[] new_row = new DataValueDescriptor[old_row.length];
339:
340:                // the only difference between getClone and cloneObject is cloneObject does
341:                // not objectify a stream.  We use getClone here.  Beetle 4896.
342:                for (int i = 0; i < old_row.length; i++) {
343:                    if (old_row[i] != null)
344:                        new_row[i] = ((DataValueDescriptor) old_row[i])
345:                                .getClone();
346:                }
347:
348:                return (new_row);
349:            }
350:
351:            /**
352:             * Return a shallow cloned row
353:             *
354:             * @return The cloned row row to use.
355:             *
356:             * @exception  StandardException  Standard exception policy.
357:             **/
358:            static DataValueDescriptor[] shallowCloneRow(
359:                    DataValueDescriptor[] old_row) throws StandardException {
360:                DataValueDescriptor[] new_row = new DataValueDescriptor[old_row.length];
361:                // the only difference between getClone and cloneObject is cloneObject does
362:                // not objectify a stream.  We use cloneObject here.  DERBY-802
363:                for (int i = 0; i < old_row.length; i++) {
364:                    if (old_row[i] != null)
365:                        new_row[i] = (DataValueDescriptor) ((CloneableObject) old_row[i])
366:                                .cloneObject();
367:                }
368:
369:                return (new_row);
370:            }
371:
372:            /**
373:             * Do the work to add one row to the hash table.
374:             * <p>
375:             *
376:             * @param row               Row to add to the hash table.
377:             * @param hash_table        The java HashTable to load into.
378:             * @param needsToClone      If the row needs to be cloned
379:             *
380:             * @exception  StandardException  Standard exception policy.
381:             **/
382:            private void add_row_to_hash_table(Hashtable hash_table,
383:                    Object[] row, boolean needsToClone)
384:                    throws StandardException {
385:                if (spillToDisk(hash_table, row))
386:                    return;
387:
388:                if (needsToClone) {
389:                    row = cloneRow(row);
390:                }
391:                Object key = KeyHasher.buildHashKey(row, key_column_numbers);
392:                Object duplicate_value = null;
393:
394:                if ((duplicate_value = hash_table.put(key, row)) == null)
395:                    doSpaceAccounting(row, false);
396:                else {
397:                    if (!remove_duplicates) {
398:                        Vector row_vec;
399:
400:                        // inserted a duplicate
401:                        if ((duplicate_value instanceof  Vector)) {
402:                            doSpaceAccounting(row, false);
403:                            row_vec = (Vector) duplicate_value;
404:                        } else {
405:                            // allocate vector to hold duplicates
406:                            row_vec = new Vector(2);
407:
408:                            // insert original row into vector
409:                            row_vec.addElement(duplicate_value);
410:                            doSpaceAccounting(row, true);
411:                        }
412:
413:                        // insert new row into vector
414:                        row_vec.addElement(row);
415:
416:                        // store vector of rows back into hash table,
417:                        // overwriting the duplicate key that was 
418:                        // inserted.
419:                        hash_table.put(key, row_vec);
420:                    }
421:                }
422:
423:                row = null;
424:            }
425:
426:            private void doSpaceAccounting(Object[] row, boolean firstDuplicate) {
427:                inmemory_rowcnt++;
428:                if (max_inmemory_rowcnt <= 0) {
429:                    max_inmemory_size -= getEstimatedMemUsage(row);
430:                    if (firstDuplicate)
431:                        max_inmemory_size -= vectorSize;
432:                }
433:            } // end of doSpaceAccounting
434:
435:            /**
436:             * Determine whether a new row should be spilled to disk and, if so, do it.
437:             *
438:             * @param hash_table The in-memory hash table
439:             * @param row
440:             *
441:             * @return true if the row was spilled to disk, false if not
442:             *
443:             * @exception  StandardException  Standard exception policy.
444:             */
445:            private boolean spillToDisk(Hashtable hash_table, Object[] row)
446:                    throws StandardException {
447:                // Once we have started spilling all new rows will go to disk, even if we have freed up some
448:                // memory by moving duplicates to disk. This simplifies handling of duplicates and accounting.
449:                if (diskHashtable == null) {
450:                    if (max_inmemory_rowcnt > 0) {
451:                        if (inmemory_rowcnt < max_inmemory_rowcnt)
452:                            return false; // Do not spill
453:                    } else if (max_inmemory_size > getEstimatedMemUsage(row))
454:
455:                        return false;
456:                    // Want to start spilling
457:                    if (!(row instanceof  DataValueDescriptor[])) {
458:                        if (SanityManager.DEBUG)
459:                            SanityManager
460:                                    .THROWASSERT("BackingStoreHashtable row is not DataValueDescriptor[]");
461:                        // Do not know how to put it on disk
462:                        return false;
463:                    }
464:                    diskHashtable = new DiskHashtable(tc,
465:                            (DataValueDescriptor[]) row, key_column_numbers,
466:                            remove_duplicates, keepAfterCommit);
467:                }
468:                Object key = KeyHasher.buildHashKey(row, key_column_numbers);
469:                Object duplicateValue = hash_table.get(key);
470:                if (duplicateValue != null) {
471:                    if (remove_duplicates)
472:                        return true; // a degenerate case of spilling
473:                    // If we are keeping duplicates then move all the duplicates from memory to disk
474:                    // This simplifies finding duplicates: they are either all in memory or all on disk.
475:                    if (duplicateValue instanceof  Vector) {
476:                        Vector duplicateVec = (Vector) duplicateValue;
477:                        for (int i = duplicateVec.size() - 1; i >= 0; i--) {
478:                            Object[] dupRow = (Object[]) duplicateVec
479:                                    .elementAt(i);
480:                            diskHashtable.put(key, dupRow);
481:                        }
482:                    } else
483:                        diskHashtable.put(key, (Object[]) duplicateValue);
484:                    hash_table.remove(key);
485:                }
486:                diskHashtable.put(key, row);
487:                return true;
488:            } // end of spillToDisk
489:
490:            /**
491:             * Take a row and return an estimate as to how much memory that
492:             * row will consume.
493:             * 
494:             * @param row The row for which we want to know the memory usage.
495:             * @return A guess as to how much memory the current row will
496:             *  use.
497:             */
498:            private long getEstimatedMemUsage(Object[] row) {
499:                long rowMem = 0;
500:                for (int i = 0; i < row.length; i++) {
501:                    if (row[i] instanceof  DataValueDescriptor)
502:                        rowMem += ((DataValueDescriptor) row[i])
503:                                .estimateMemoryUsage();
504:                    rowMem += ClassSize.refSize;
505:                }
506:
507:                rowMem += ClassSize.refSize;
508:                return rowMem;
509:            }
510:
511:            /**************************************************************************
512:             * Public Methods of This class:
513:             **************************************************************************
514:             */
515:
516:            /**
517:             * Close the BackingStoreHashtable.
518:             * <p>
519:             * Perform any necessary cleanup after finishing with the hashtable.  Will
520:             * deallocate/dereference objects as necessary.  If the table has gone
521:             * to disk this will drop any on disk files used to support the hash table.
522:             * <p>
523:             *
524:             * @exception  StandardException  Standard exception policy.
525:             **/
526:            public void close() throws StandardException {
527:                hash_table = null;
528:                if (diskHashtable != null) {
529:                    diskHashtable.close();
530:                    diskHashtable = null;
531:                }
532:                return;
533:            }
534:
535:            /**
536:             * Return an Enumeration that can be used to scan entire table.
537:             * <p>
538:             * RESOLVE - is it worth it to support this routine when we have a
539:             *           disk overflow hash table?
540:             *
541:             * @return The Enumeration.
542:             *
543:             * @exception  StandardException  Standard exception policy.
544:             **/
545:            public Enumeration elements() throws StandardException {
546:                if (diskHashtable == null)
547:                    return (hash_table.elements());
548:                return new BackingStoreHashtableEnumeration();
549:            }
550:
551:            /**
552:             * get data associated with given key.
553:             * <p>
554:             * There are 2 different types of objects returned from this routine.
555:             * <p>
556:             * In both cases, the key value is either the object stored in 
557:             * row[key_column_numbers[0]], if key_column_numbers.length is 1, 
558:             * otherwise it is a KeyHasher containing
559:             * the objects stored in row[key_column_numbers[0, 1, ...]].
560:             * For every qualifying unique row value an entry is placed into the 
561:             * Hashtable.
562:             * <p>
563:             * For row values with duplicates, the value of the data is a Vector of
564:             * rows.
565:             * <p>
566:             * The caller will have to call "instanceof" on the data value
567:             * object if duplicates are expected, to determine if the data value
568:             * of the Hashtable entry is a row or is a Vector of rows.
569:             * <p>
570:             * The BackingStoreHashtable "owns" the objects returned from the get()
571:             * routine.  They remain valid until the next access to the 
572:             * BackingStoreHashtable.  If the client needs to keep references to these
573:             * objects, it should clone copies of the objects.  A valid 
574:             * BackingStoreHashtable can place all rows into a disk based conglomerate,
575:             * declare a row buffer and then reuse that row buffer for every get()
576:             * call.
577:             *
578:             * @return The value to which the key is mapped in this hashtable; 
579:             *         null if the key is not mapped to any value in this hashtable.
580:             *
581:             * @param key    The key to hash on.
582:             *
583:             * @exception  StandardException  Standard exception policy.
584:             **/
585:            public Object get(Object key) throws StandardException {
586:                Object obj = hash_table.get(key);
587:                if (diskHashtable == null || obj != null)
588:                    return obj;
589:                return diskHashtable.get(key);
590:            }
591:
592:            /**
593:             * Return runtime stats to caller by adding them to prop.
594:             * <p>
595:             *
596:             * @param prop   The set of properties to append to.
597:             *
598:             * @exception  StandardException  Standard exception policy.
599:             **/
600:            public void getAllRuntimeStats(Properties prop)
601:                    throws StandardException {
602:                if (auxillary_runtimestats != null)
603:                    org.apache.derby.iapi.util.PropertyUtil.copyProperties(
604:                            auxillary_runtimestats, prop);
605:            }
606:
607:            /**
608:             * remove a row from the hash table.
609:             * <p>
610:             * a remove of a duplicate removes the entire duplicate list.
611:             *
612:             * @param key          The key of the row to remove.
613:             *
614:             * @exception  StandardException  Standard exception policy.
615:             **/
616:            public Object remove(Object key) throws StandardException {
617:                Object obj = hash_table.remove(key);
618:                if (obj != null || diskHashtable == null)
619:                    return obj;
620:                return diskHashtable.remove(key);
621:            }
622:
623:            /**
624:             * Set the auxillary runtime stats.
625:             * <p>
626:             * getRuntimeStats() will return both the auxillary stats and any
627:             * BackingStoreHashtable() specific stats.  Note that each call to
628:             * setAuxillaryRuntimeStats() overwrites the Property set that was
629:             * set previously.
630:             *
631:             * @param prop   The set of properties to append from.
632:             *
633:             * @exception  StandardException  Standard exception policy.
634:             **/
635:            public void setAuxillaryRuntimeStats(Properties prop)
636:                    throws StandardException {
637:                auxillary_runtimestats = prop;
638:            }
639:
640:            /**
641:             * Put a row into the hash table.
642:             * <p>
643:             * The in memory hash table will need to keep a reference to the row
644:             * after the put call has returned.  If "needsToClone" is true then the
645:             * hash table will make a copy of the row and put that, else if 
646:             * "needsToClone" is false then the hash table will keep a reference to
647:             * the row passed in and no copy will be made.
648:             * <p>
649:             * If rouine returns false, then no reference is kept to the duplicate
650:             * row which was rejected (thus allowing caller to reuse the object).
651:             *
652:             * @param needsToClone does this routine have to make a copy of the row,
653:             *                     in order to keep a reference to it after return?
654:             * @param row          The row to insert into the table.
655:             *
656:             * @return true if row was inserted into the hash table.  Returns
657:             *              false if the BackingStoreHashtable is eliminating 
658:             *              duplicates, and the row being inserted is a duplicate,
659:             *				or if we are skipping rows with 1 or more null key columns
660:             *				and we find a null key column.
661:             *
662:             * @exception  StandardException  Standard exception policy.
663:             **/
664:            public boolean put(boolean needsToClone, Object[] row)
665:                    throws StandardException {
666:                // Are any key columns null?
667:                if (skipNullKeyColumns) {
668:                    int index = 0;
669:                    for (; index < key_column_numbers.length; index++) {
670:                        if (SanityManager.DEBUG) {
671:                            if (!(row[key_column_numbers[index]] instanceof  Storable)) {
672:                                SanityManager
673:                                        .THROWASSERT("row[key_column_numbers[index]] expected to be Storable, not "
674:                                                + row[key_column_numbers[index]]
675:                                                        .getClass().getName());
676:                            }
677:                        }
678:                        Storable storable = (Storable) row[key_column_numbers[index]];
679:                        if (storable.isNull()) {
680:                            return false;
681:                        }
682:                    }
683:                }
684:
685:                Object key = KeyHasher.buildHashKey(row, key_column_numbers);
686:
687:                if ((remove_duplicates) && (get(key) != null)) {
688:                    return (false);
689:                } else {
690:                    add_row_to_hash_table(hash_table, row, needsToClone);
691:                    return (true);
692:                }
693:            }
694:
695:            /**
696:             * Return number of unique rows in the hash table.
697:             * <p>
698:             *
699:             * @return The number of unique rows in the hash table.
700:             *
701:             * @exception  StandardException  Standard exception policy.
702:             **/
703:            public int size() throws StandardException {
704:                if (diskHashtable == null)
705:                    return (hash_table.size());
706:                return hash_table.size() + diskHashtable.size();
707:            }
708:
709:            private class BackingStoreHashtableEnumeration implements 
710:                    Enumeration {
711:                private Enumeration memoryEnumeration;
712:                private Enumeration diskEnumeration;
713:
714:                BackingStoreHashtableEnumeration() {
715:                    memoryEnumeration = hash_table.elements();
716:                    if (diskHashtable != null) {
717:                        try {
718:                            diskEnumeration = diskHashtable.elements();
719:                        } catch (StandardException se) {
720:                            diskEnumeration = null;
721:                        }
722:                    }
723:                }
724:
725:                public boolean hasMoreElements() {
726:                    if (memoryEnumeration != null) {
727:                        if (memoryEnumeration.hasMoreElements())
728:                            return true;
729:                        memoryEnumeration = null;
730:                    }
731:                    if (diskEnumeration == null)
732:                        return false;
733:                    return diskEnumeration.hasMoreElements();
734:                }
735:
736:                public Object nextElement() throws NoSuchElementException {
737:                    if (memoryEnumeration != null) {
738:                        if (memoryEnumeration.hasMoreElements())
739:                            return memoryEnumeration.nextElement();
740:                        memoryEnumeration = null;
741:                    }
742:                    return diskEnumeration.nextElement();
743:                }
744:            } // end of class BackingStoreHashtableEnumeration
745:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.