001: /**
002: * com.mckoi.database.jdbcserver.AbstractJDBCDatabaseInterface 16 Mar 2002
003: *
004: * Mckoi SQL Database ( http://www.mckoi.com/database )
005: * Copyright (C) 2000, 2001, 2002 Diehl and Associates, Inc.
006: *
007: * This program is free software; you can redistribute it and/or
008: * modify it under the terms of the GNU General Public License
009: * Version 2 as published by the Free Software Foundation.
010: *
011: * This program is distributed in the hope that it will be useful,
012: * but WITHOUT ANY WARRANTY; without even the implied warranty of
013: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
014: * GNU General Public License Version 2 for more details.
015: *
016: * You should have received a copy of the GNU General Public License
017: * Version 2 along with this program; if not, write to the Free Software
018: * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
019: *
020: * Change Log:
021: *
022: *
023: */package com.mckoi.database.jdbcserver;
024:
025: import com.mckoi.database.*;
026: import com.mckoi.database.global.*;
027: import com.mckoi.database.interpret.Statement;
028: import com.mckoi.database.interpret.SQLQueryExecutor;
029: import com.mckoi.database.sql.SQL;
030: import com.mckoi.database.sql.ParseException;
031: import com.mckoi.database.jdbc.*;
032: import com.mckoi.util.IntegerVector;
033: import com.mckoi.util.StringUtil;
034: import com.mckoi.debug.*;
035:
036: import java.sql.SQLException;
037: import java.io.*;
038: import java.util.ArrayList;
039: import java.util.Iterator;
040: import java.util.HashMap;
041:
042: /**
043: * An abstract implementation of JDBCDatabaseInterface that provides a
044: * connection between a single DatabaseConnection and a DatabaseInterface
045: * implementation.
046: * <p>
047: * This receives database commands from the JDBC layer and dispatches the
048: * queries to the database system. It also manages ResultSet maps for query
049: * results.
050: * <p>
051: * This implementation does not handle authentication (login) / construction
052: * of the DatabaseConnection object, or disposing of the connection.
053: * <p>
054: * This implementation ignores the AUTO-COMMIT flag when a query is executed.
055: * To implement AUTO-COMMIT, you should 'commit' after a command is executed.
056: * <p>
057: * SYNCHRONIZATION: This interface is NOT thread-safe. To make a thread-safe
058: * implementation use the LockingMechanism.
059: * <p>
060: * See JDBCDatabaseInterface for a standard server-side implementation of this
061: * class.
062: *
063: * @author Tobias Downer
064: */
065:
066: public abstract class AbstractJDBCDatabaseInterface implements
067: DatabaseInterface {
068:
069: /**
070: * The Databas object that represents the context of this
071: * database interface.
072: */
073: private Database database;
074:
075: /**
076: * The mapping that maps from result id number to Table object that this
077: * JDBC connection is currently maintaining.
078: * <p>
079: * NOTE: All Table objects are now valid over a database shutdown + init.
080: */
081: private HashMap result_set_map;
082:
083: /**
084: * This is incremented every time a result set is added to the map. This
085: * way, we always have a unique key on hand.
086: */
087: private int unique_result_id;
088:
089: /**
090: * Access to information regarding the user logged in on this connection.
091: * If no user is logged in, this is left as 'null'. We can also use this to
092: * retreive the Database object the user is logged into.
093: */
094: private User user = null;
095:
096: /**
097: * The database connection transaction.
098: */
099: private DatabaseConnection database_connection;
100:
101: /**
102: * The SQL parser object for this interface. When a statement is being
103: * parsed, this object is sychronized.
104: */
105: private SQLQueryExecutor sql_executor;
106: // private SQL sql_parser;
107:
108: /**
109: * Mantains a mapping from streamable object id for a particular object that
110: * is currently being uploaded to the server. This maps streamable_object_id
111: * to blob id reference.
112: */
113: private HashMap blob_id_map;
114:
115: /**
116: * Set to true when this database interface is disposed.
117: */
118: private boolean disposed;
119:
120: /**
121: * Sets up the database interface.
122: */
123: public AbstractJDBCDatabaseInterface(Database database) {
124: this .database = database;
125: result_set_map = new HashMap();
126: blob_id_map = new HashMap();
127: unique_result_id = 1;
128: disposed = false;
129: }
130:
131: // ---------- Utility methods ----------
132:
133: /**
134: * Initializes this database interface with a User and DatabaseConnection
135: * object. This would typically be called from inside an authentication
136: * method, or from 'login'. This must be set before the object can be
137: * used.
138: */
139: protected final void init(User user, DatabaseConnection connection) {
140: this .user = user;
141: this .database_connection = connection;
142: // Set up the sql parser.
143: sql_executor = new SQLQueryExecutor();
144: // sql_parser = new SQL(new StringReader(""));
145: }
146:
147: /**
148: * Returns the Database that is the context of this interface.
149: */
150: protected final Database getDatabase() {
151: return database;
152: }
153:
154: /**
155: * Returns the User object for this connection.
156: */
157: protected final User getUser() {
158: return user;
159: }
160:
161: /**
162: * Returns a DebugLogger object that can be used to log debug messages
163: * against.
164: */
165: public final DebugLogger Debug() {
166: return getDatabase().Debug();
167: }
168:
169: /**
170: * Returns the DatabaseConnection objcet for this connection.
171: */
172: protected final DatabaseConnection getDatabaseConnection() {
173: return database_connection;
174: }
175:
176: /**
177: * Adds this result set to the list of result sets being handled through
178: * this processor. Returns a number that unique identifies this result
179: * set.
180: */
181: private int addResultSet(ResultSetInfo result) {
182: // Lock the roots of the result set.
183: result.lockRoot(-1); // -1 because lock_key not implemented
184:
185: // Make a new result id
186: int result_id;
187: // This ensures this block can handle concurrent updates.
188: synchronized (result_set_map) {
189: result_id = ++unique_result_id;
190: // Add the result to the map.
191: result_set_map.put(new Integer(result_id), result);
192: }
193:
194: return result_id;
195: }
196:
197: /**
198: * Gets the result set with the given result_id.
199: */
200: private ResultSetInfo getResultSet(int result_id) {
201: synchronized (result_set_map) {
202: return (ResultSetInfo) result_set_map.get(new Integer(
203: result_id));
204: }
205: }
206:
207: /**
208: * Disposes of the result set with the given result_id. After this has
209: * been called, the GC should garbage the table.
210: */
211: private void disposeResultSet(int result_id) {
212: // Remove this entry.
213: ResultSetInfo table;
214: synchronized (result_set_map) {
215: table = (ResultSetInfo) result_set_map.remove(new Integer(
216: result_id));
217: }
218: if (table != null) {
219: table.dispose();
220: } else {
221: Debug().write(Lvl.ERROR, this ,
222: "Attempt to dispose invalid 'result_id'.");
223: }
224: }
225:
226: /**
227: * Clears the contents of the result set map. This removes all result_id
228: * ResultSetInfo maps.
229: */
230: protected final void clearResultSetMap() {
231: Iterator keys;
232: ArrayList list;
233: synchronized (result_set_map) {
234: keys = result_set_map.keySet().iterator();
235:
236: list = new ArrayList();
237: while (keys.hasNext()) {
238: list.add(keys.next());
239: }
240: }
241: keys = list.iterator();
242:
243: while (keys.hasNext()) {
244: int result_id = ((Integer) keys.next()).intValue();
245: disposeResultSet(result_id);
246: }
247: }
248:
249: /**
250: * Wraps a Throwable thrown by the execution of a query in DatabaseConnection
251: * with an SQLException and puts the appropriate error messages to the debug
252: * log.
253: */
254: protected final SQLException handleExecuteThrowable(Throwable e,
255: SQLQuery query) {
256: if (e instanceof ParseException) {
257:
258: Debug().writeException(Lvl.WARNING, e);
259:
260: // Parse exception when parsing the SQL.
261: String msg = e.getMessage();
262: msg = StringUtil.searchAndReplace(msg, "\r", "");
263: return new MSQLException(msg, msg, 35, e);
264:
265: } else if (e instanceof TransactionException) {
266:
267: TransactionException te = (TransactionException) e;
268:
269: // Output query that was in error to debug log.
270: Debug().write(Lvl.INFORMATION, this ,
271: "Transaction error on: " + query);
272: Debug().writeException(Lvl.INFORMATION, e);
273:
274: // Denotes a transaction exception.
275: return new MSQLException(e.getMessage(), e.getMessage(),
276: 200 + te.getType(), e);
277: } else {
278:
279: // Output query that was in error to debug log.
280: Debug().write(
281: Lvl.WARNING,
282: this ,
283: "Exception thrown during query processing on: "
284: + query);
285: Debug().writeException(Lvl.WARNING, e);
286:
287: // Error, we need to return exception to client.
288: return new MSQLException(e.getMessage(), e.getMessage(), 1,
289: e);
290:
291: }
292:
293: }
294:
295: /**
296: * Returns a reference implementation object that handles an object that is
297: * either currently being pushed onto the server from the client, or is being
298: * used to reference a large object in an SQLQuery.
299: */
300: private Ref getLargeObjectRefFor(long streamable_object_id,
301: byte type, long object_length) {
302: // Does this mapping already exist?
303: Long s_ob_id = new Long(streamable_object_id);
304: Object ob = blob_id_map.get(s_ob_id);
305: if (ob == null) {
306: // Doesn't exist so create a new blob handler.
307: Ref ref = database_connection.createNewLargeObject(type,
308: object_length);
309: // Make the blob id mapping
310: blob_id_map.put(s_ob_id, ref);
311: // And return it
312: return ref;
313: } else {
314: // Exists so use this blob ref.
315: return (Ref) ob;
316: }
317: }
318:
319: /**
320: * Returns a reference object that handles the given streamable object id
321: * in this database interface. Unlike the other 'getLargeObjectRefFor
322: * method, this will not create a new handle if it has not already been
323: * formed before by this connection. If the large object ref is not found
324: * an exception is generated.
325: */
326: private Ref getLargeObjectRefFor(long streamable_object_id)
327: throws SQLException {
328: Long s_ob_id = new Long(streamable_object_id);
329: Object ob = blob_id_map.get(s_ob_id);
330: if (ob == null) {
331: // This basically means the streamable object hasn't been pushed onto the
332: // server.
333: throw new SQLException(
334: "Invalid streamable object id in query.");
335: } else {
336: return (Ref) ob;
337: }
338: }
339:
340: /**
341: * Removes the large object reference from the HashMap for the given
342: * streamable object id from the HashMap. This allows the Ref to finalize if
343: * the VM does not maintain any other pointers to it, and therefore clean up
344: * the resources in the store.
345: */
346: private Ref flushLargeObjectRefFromCache(long streamable_object_id)
347: throws SQLException {
348: try {
349: Long s_ob_id = new Long(streamable_object_id);
350: Object ob = blob_id_map.remove(s_ob_id);
351: if (ob == null) {
352: // This basically means the streamable object hasn't been pushed onto the
353: // server.
354: throw new SQLException(
355: "Invalid streamable object id in query.");
356: } else {
357: Ref ref = (Ref) ob;
358: // Mark the blob as complete
359: ref.complete();
360: // And return it.
361: return ref;
362: }
363: } catch (IOException e) {
364: Debug().writeException(e);
365: throw new SQLException("IO Error: " + e.getMessage());
366: }
367: }
368:
369: /**
370: * Disposes all resources associated with this object. This clears the
371: * ResultSet map, and NULLs all references to help the garbage collector.
372: * This method would normally be called from implementations of the
373: * 'dispose' method.
374: */
375: protected final void internalDispose() {
376: disposed = true;
377: // Clear the result set mapping
378: clearResultSetMap();
379: user = null;
380: database_connection = null;
381: sql_executor = null;
382: }
383:
384: /**
385: * Checks if the interface is disposed, and if it is generates a friendly
386: * SQLException informing the user of this.
387: */
388: protected final void checkNotDisposed() throws SQLException {
389: if (disposed) {
390: throw new SQLException(
391: "Database interface was disposed (was the connection closed?)");
392: }
393: }
394:
395: // ---------- Implemented from DatabaseInterface ----------
396:
397: public void pushStreamableObjectPart(byte type, long object_id,
398: long object_length, byte[] buf, long offset, int length)
399: throws SQLException {
400: checkNotDisposed();
401:
402: try {
403: // Create or retrieve the object managing this binary object_id in this
404: // connection.
405: Ref ref = getLargeObjectRefFor(object_id, type,
406: object_length);
407: // Push this part of the blob into the object.
408: ref.write(offset, buf, length);
409: } catch (IOException e) {
410: Debug().writeException(e);
411: throw new SQLException("IO Error: " + e.getMessage());
412: }
413:
414: }
415:
416: public QueryResponse execQuery(SQLQuery query) throws SQLException {
417:
418: checkNotDisposed();
419:
420: // Record the query start time
421: long start_time = System.currentTimeMillis();
422: // Where query result eventually resides.
423: ResultSetInfo result_set_info;
424: int result_id = -1;
425:
426: // For each StreamableObject in the SQLQuery object, translate it to a
427: // Ref object that presumably has been pre-pushed onto the server from
428: // the client.
429: boolean blobs_were_flushed = false;
430: Object[] vars = query.getVars();
431: if (vars != null) {
432: for (int i = 0; i < vars.length; ++i) {
433: Object ob = vars[i];
434: // This is a streamable object, so convert it to a *Ref
435: if (ob != null && ob instanceof StreamableObject) {
436: StreamableObject s_object = (StreamableObject) ob;
437: // Flush the streamable object from the cache
438: // Note that this also marks the blob as complete in the blob store.
439: Ref ref = flushLargeObjectRefFromCache(s_object
440: .getIdentifier());
441: // Set the Ref object in the query.
442: vars[i] = ref;
443: // There are blobs in this query that were written to the blob store.
444: blobs_were_flushed = true;
445: }
446: }
447: }
448:
449: // After the blobs have been flushed, we must tell the connection to
450: // flush and synchronize any blobs that have been written to disk. This
451: // is an important (if subtle) step.
452: if (blobs_were_flushed) {
453: database_connection.flushBlobStore();
454: }
455:
456: try {
457:
458: // Evaluate the sql query.
459: Table result = sql_executor.execute(database_connection,
460: query);
461:
462: // Put the result in the result cache... This will lock this object
463: // until it is removed from the result set cache. Returns an id that
464: // uniquely identifies this result set in future communication.
465: // NOTE: This locks the roots of the table so that its contents
466: // may not be altered.
467: result_set_info = new ResultSetInfo(query, result);
468: result_id = addResultSet(result_set_info);
469:
470: } catch (Throwable e) {
471: // If result_id set, then dispose the result set.
472: if (result_id != -1) {
473: disposeResultSet(result_id);
474: }
475:
476: // Handle the throwable during query execution
477: throw handleExecuteThrowable(e, query);
478:
479: }
480:
481: // The time it took the query to execute.
482: long taken = System.currentTimeMillis() - start_time;
483:
484: // Return the query response
485: return new JDIQueryResponse(result_id, result_set_info,
486: (int) taken, "");
487:
488: }
489:
490: public ResultPart getResultPart(int result_id, int row_number,
491: int row_count) throws SQLException {
492:
493: checkNotDisposed();
494:
495: ResultSetInfo table = getResultSet(result_id);
496: if (table == null) {
497: throw new MSQLException("'result_id' invalid.", null, 4,
498: (Throwable) null);
499: }
500:
501: int row_end = row_number + row_count;
502:
503: if (row_number < 0 || row_number >= table.getRowCount()
504: || row_end > table.getRowCount()) {
505: throw new MSQLException("Result part out of range.", null,
506: 4, (Throwable) null);
507: }
508:
509: try {
510: int col_count = table.getColumnCount();
511: ResultPart block = new ResultPart(row_count * col_count);
512: for (int r = row_number; r < row_end; ++r) {
513: for (int c = 0; c < col_count; ++c) {
514: TObject t_object = table.getCellContents(c, r);
515: // If this is a Ref, we must assign it a streamable object
516: // id that the client can use to access the large object.
517: Object client_ob;
518: if (t_object.getObject() instanceof Ref) {
519: Ref ref = (Ref) t_object.getObject();
520: client_ob = new StreamableObject(ref.getType(),
521: ref.getRawSize(), ref.getID());
522: } else {
523: client_ob = t_object.getObject();
524: }
525: block.addElement(client_ob);
526: }
527: }
528: return block;
529: } catch (Throwable e) {
530: Debug().writeException(Lvl.WARNING, e);
531: // If an exception was generated while getting the cell contents, then
532: // throw an SQLException.
533: throw new MSQLException("Exception while reading results: "
534: + e.getMessage(), e.getMessage(), 4, e);
535: }
536:
537: }
538:
539: public void disposeResult(int result_id) throws SQLException {
540: // Check the DatabaseInterface is not dispoed
541: checkNotDisposed();
542: // Dispose the result
543: disposeResultSet(result_id);
544: }
545:
546: public StreamableObjectPart getStreamableObjectPart(int result_id,
547: long streamable_object_id, long offset, int len)
548: throws SQLException {
549:
550: checkNotDisposed();
551:
552: // NOTE: It's important we handle the 'result_id' here and don't just
553: // treat the 'streamable_object_id' as a direct reference into the
554: // blob store. If we don't authenticate a streamable object against its
555: // originating result, we can't guarantee the user has permission to
556: // access the data. This would mean a malicious client could access
557: // BLOB data they may not be permitted to look at.
558: // This also protects us from clients that might send a bogus
559: // streamable_object_id and cause unpredictible results.
560:
561: ResultSetInfo table = getResultSet(result_id);
562: if (table == null) {
563: throw new MSQLException("'result_id' invalid.", null, 4,
564: (Throwable) null);
565: }
566:
567: // Get the large object ref that has been cached in the result set.
568: Ref ref = table.getRef(streamable_object_id);
569: if (ref == null) {
570: throw new MSQLException("'streamable_object_id' invalid.",
571: null, 4, (Throwable) null);
572: }
573:
574: // Restrict the server so that a streamable object part can not exceed
575: // 512 KB.
576: if (len > 512 * 1024) {
577: throw new MSQLException("Request length exceeds 512 KB",
578: null, 4, (Throwable) null);
579: }
580:
581: try {
582: // Read the blob part into the byte array.
583: byte[] blob_part = new byte[len];
584: ref.read(offset, blob_part, len);
585:
586: // And return as a StreamableObjectPart object.
587: return new StreamableObjectPart(blob_part);
588:
589: } catch (IOException e) {
590: throw new MSQLException("Exception while reading blob: "
591: + e.getMessage(), e.getMessage(), 4, e);
592: }
593:
594: }
595:
596: public void disposeStreamableObject(int result_id,
597: long streamable_object_id) throws SQLException {
598: checkNotDisposed();
599:
600: // This actually isn't as an important step as I had originally designed
601: // for. To dispose we simply remove the blob ref from the cache in the
602: // result. If this doesn't happen, nothing seriously bad will happen.
603:
604: ResultSetInfo table = getResultSet(result_id);
605: if (table == null) {
606: throw new MSQLException("'result_id' invalid.", null, 4,
607: (Throwable) null);
608: }
609:
610: // Remove this Ref from the table
611: table.removeRef(streamable_object_id);
612:
613: }
614:
615: // ---------- Clean up ----------
616:
617: /**
618: * Clean up if this object is GC'd.
619: */
620: public void finalize() throws Throwable {
621: super .finalize();
622: try {
623: if (!disposed) {
624: dispose();
625: }
626: } catch (Throwable e) { /* ignore this */
627: }
628: }
629:
630: // ---------- Inner classes ----------
631:
632: /**
633: * The response to a query.
634: */
635: private final static class JDIQueryResponse implements
636: QueryResponse {
637:
638: int result_id;
639: ResultSetInfo result_set_info;
640: int query_time;
641: String warnings;
642:
643: JDIQueryResponse(int result_id, ResultSetInfo result_set_info,
644: int query_time, String warnings) {
645: this .result_id = result_id;
646: this .result_set_info = result_set_info;
647: this .query_time = query_time;
648: this .warnings = warnings;
649: }
650:
651: public int getResultID() {
652: return result_id;
653: }
654:
655: public int getQueryTimeMillis() {
656: return query_time;
657: }
658:
659: public int getRowCount() {
660: return result_set_info.getRowCount();
661: }
662:
663: public int getColumnCount() {
664: return result_set_info.getColumnCount();
665: }
666:
667: public ColumnDescription getColumnDescription(int n) {
668: return result_set_info.getFields()[n];
669: }
670:
671: public String getWarnings() {
672: return warnings;
673: }
674:
675: }
676:
677: /**
678: * Whenever a ResultSet is generated, this object contains the result set.
679: * This class only allows calls to safe methods in Table.
680: * <p>
681: * NOTE: This is safe provided,
682: * a) The column topology doesn't change (NOTE: issues with ALTER command)
683: * b) Root locking prevents modification to rows.
684: */
685: private final static class ResultSetInfo {
686:
687: /**
688: * The SQLQuery that was executed to produce this result.
689: */
690: private SQLQuery query;
691:
692: /**
693: * The table that is the result.
694: */
695: private Table result;
696:
697: /**
698: * A set of ColumnDescription that describes each column in the ResultSet.
699: */
700: private ColumnDescription[] col_desc;
701:
702: /**
703: * IntegerVector that contains the row index into the table for each
704: * row of the result. For example, row.intAt(5) will return the row index
705: * of 'result' of the 5th row item.
706: */
707: private IntegerVector row_index_map;
708:
709: /**
710: * Set to true if the result table has a SimpleRowEnumeration, therefore
711: * guarenteeing we do not need to store a row lookup list.
712: */
713: private boolean result_is_simple_enum;
714:
715: /**
716: * The number of rows in the result.
717: */
718: private int result_row_count;
719:
720: /**
721: * Incremented when we lock roots.
722: */
723: private int locked;
724:
725: /**
726: * A HashMap of blob_reference_id values to Ref objects used to handle
727: * and streamable objects in this result.
728: */
729: private HashMap streamable_blob_map;
730:
731: /**
732: * Constructs the result set.
733: */
734: ResultSetInfo(SQLQuery query, Table table) {
735: this .query = query;
736: this .result = table;
737: this .streamable_blob_map = new HashMap();
738:
739: result_row_count = table.getRowCount();
740:
741: // HACK: Read the contents of the first row so that we can pick up
742: // any errors with reading, and also to fix the 'uniquekey' bug
743: // that causes a new transaction to be started if 'uniquekey' is
744: // a column and the value is resolved later.
745: RowEnumeration row_enum = table.rowEnumeration();
746: if (row_enum.hasMoreRows()) {
747: int row_index = row_enum.nextRowIndex();
748: for (int c = 0; c < table.getColumnCount(); ++c) {
749: table.getCellContents(c, row_index);
750: }
751: }
752: // If simple enum, note it here
753: result_is_simple_enum = (row_enum instanceof SimpleRowEnumeration);
754: row_enum = null;
755:
756: // Build 'row_index_map' if not a simple enum
757: if (!result_is_simple_enum) {
758: row_index_map = new IntegerVector(table.getRowCount());
759: RowEnumeration enum = table.rowEnumeration();
760: while (enum.hasMoreRows()) {
761: row_index_map.addInt(enum.nextRowIndex());
762: }
763: }
764:
765: // This is a safe operation provides we are shared.
766: // Copy all the TableField columns from the table to our own
767: // ColumnDescription array, naming each column by what is returned from
768: // the 'getResolvedVariable' method.
769: final int col_count = table.getColumnCount();
770: col_desc = new ColumnDescription[col_count];
771: for (int i = 0; i < col_count; ++i) {
772: Variable v = table.getResolvedVariable(i);
773: String field_name;
774: if (v.getTableName() == null) {
775: // This means the column is an alias
776: field_name = "@a" + v.getName();
777: }
778: else {
779: // This means the column is an schema/table/column reference
780: field_name = "@f" + v.toString();
781: }
782: col_desc[i] =
783: table.getColumnDefAt(i).columnDescriptionValue(field_name);
784:// col_desc[i] = new ColumnDescription(field_name, table.getFieldAt(i));
785: }
786:
787: locked = 0;
788: }
789:
790: /**
791: * Returns the SQLQuery that was used to produce this result.
792: */
793: SQLQuery getSQLQuery() {
794: return query;
795: }
796:
797: /**
798: * Returns a Ref that has been cached in this table object by its
799: * identifier value.
800: */
801: Ref getRef(long id) {
802: return (Ref) streamable_blob_map.get(new Long(id));
803: }
804:
805: /**
806: * Removes a Ref that has been cached in this table object by its
807: * identifier value.
808: */
809: void removeRef(long id) {
810: streamable_blob_map.remove(new Long(id));
811: }
812:
813: /**
814: * Disposes this object.
815: */
816: void dispose() {
817: while (locked > 0) {
818: unlockRoot(-1);
819: }
820: result = null;
821: row_index_map = null;
822: col_desc = null;
823: }
824:
825: /**
826: * Gets the cell contents of the cell at the given row/column.
827: * <p>
828: * Safe only if roots are locked.
829: */
830: TObject getCellContents(int column, int row) {
831: if (locked > 0) {
832: int real_row;
833: if (result_is_simple_enum) {
834: real_row = row;
835: } else {
836: real_row = row_index_map.intAt(row);
837: }
838: TObject tob = result.getCellContents(column, real_row);
839:
840: // If this is a large object reference then cache it so a streamable
841: // object can reference it via this result.
842: if (tob.getObject() instanceof Ref) {
843: Ref ref = (Ref) tob.getObject();
844: streamable_blob_map.put(new Long(ref.getID()), ref);
845: }
846:
847: return tob;
848: } else {
849: throw new RuntimeException("Table roots not locked!");
850: }
851: }
852:
853: /**
854: * Returns the column count.
855: */
856: int getColumnCount() {
857: return result.getColumnCount();
858: }
859:
860: /**
861: * Returns the row count.
862: */
863: int getRowCount() {
864: return result_row_count;
865: }
866:
867: /**
868: * Returns the ColumnDescription array of all the columns in the result.
869: */
870: ColumnDescription[] getFields() {
871: return col_desc;
872: }
873:
874: /**
875: * Locks the root of the result set.
876: */
877: void lockRoot(int key) {
878: result.lockRoot(key);
879: ++locked;
880: }
881:
882: /**
883: * Unlocks the root of the result set.
884: */
885: void unlockRoot(int key) {
886: result.unlockRoot(key);
887: --locked;
888: }
889:
890: }
891:
892: }
|