001: /*
002:
003: Derby - Class org.apache.derby.iapi.db.OnlineCompress
004:
005: Licensed to the Apache Software Foundation (ASF) under one or more
006: contributor license agreements. See the NOTICE file distributed with
007: this work for additional information regarding copyright ownership.
008: The ASF licenses this file to you under the Apache License, Version 2.0
009: (the "License"); you may not use this file except in compliance with
010: the License. You may obtain a copy of the License at
011:
012: http://www.apache.org/licenses/LICENSE-2.0
013:
014: Unless required by applicable law or agreed to in writing, software
015: distributed under the License is distributed on an "AS IS" BASIS,
016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
017: See the License for the specific language governing permissions and
018: limitations under the License.
019:
020: */
021:
022: package org.apache.derby.iapi.db;
023:
024: import org.apache.derby.iapi.error.StandardException;
025: import org.apache.derby.iapi.error.PublicAPI;
026:
027: import org.apache.derby.iapi.sql.dictionary.DataDictionaryContext;
028: import org.apache.derby.iapi.sql.dictionary.DataDictionary;
029: import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
030: import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
031: import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
032: import org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList;
033: import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;
034: import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptorList;
035: import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
036:
037: import org.apache.derby.iapi.sql.depend.DependencyManager;
038:
039: import org.apache.derby.iapi.sql.execute.ExecRow;
040: import org.apache.derby.iapi.sql.execute.ExecutionContext;
041:
042: import org.apache.derby.iapi.types.DataValueDescriptor;
043: import org.apache.derby.iapi.types.DataValueFactory;
044:
045: import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
046: import org.apache.derby.iapi.sql.conn.ConnectionUtil;
047:
048: import org.apache.derby.iapi.store.access.TransactionController;
049: import org.apache.derby.iapi.types.RowLocation;
050: import org.apache.derby.iapi.store.access.ScanController;
051: import org.apache.derby.iapi.store.access.ConglomerateController;
052: import org.apache.derby.iapi.store.access.GroupFetchScanController;
053: import org.apache.derby.iapi.store.access.RowUtil;
054: import org.apache.derby.iapi.store.access.Qualifier;
055:
056: import org.apache.derby.iapi.services.sanity.SanityManager;
057:
058: import org.apache.derby.iapi.reference.SQLState;
059:
060: import org.apache.derby.iapi.services.io.FormatableBitSet;
061:
062: import java.sql.SQLException;
063:
064: /**
065:
066: Implementation of SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE().
067: <p>
068: Code which implements the following system procedure:
069:
070: void SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE(
071: IN SCHEMANAME VARCHAR(128),
072: IN TABLENAME VARCHAR(128),
073: IN PURGE_ROWS SMALLINT,
074: IN DEFRAGMENT_ROWS SMALLINT,
075: IN TRUNCATE_END SMALLINT)
076: <p>
077: Use the SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE system procedure to reclaim
078: unused, allocated space in a table and its indexes. Typically, unused allocated
079: space exists when a large amount of data is deleted from a table, and there
080: have not been subsequent inserts to use the space freed by the deletes.
081: By default, Derby does not return unused space to the operating system. For
082: example, once a page has been allocated to a table or index, it is not
083: automatically returned to the operating system until the table or index is
084: destroyed. SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE allows you to return unused
085: space to the operating system.
086: <p>
087: This system procedure can be used to force 3 levels of in place compression
088: of a SQL table: PURGE_ROWS, DEFRAGMENT_ROWS, TRUNCATE_END. Unlike
089: SYSCS_UTIL.SYSCS_COMPRESS_TABLE() all work is done in place in the existing
090: table/index.
091: <p>
092: Syntax:
093: SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE(
094: IN SCHEMANAME VARCHAR(128),
095: IN TABLENAME VARCHAR(128),
096: IN PURGE_ROWS SMALLINT,
097: IN DEFRAGMENT_ROWS SMALLINT,
098: IN TRUNCATE_END SMALLINT)
099: <p>
100: SCHEMANAME:
101: An input argument of type VARCHAR(128) that specifies the schema of the table. Passing a null will result in an error.
102: <p>
103: TABLENAME:
104: An input argument of type VARCHAR(128) that specifies the table name of the
105: table. The string must exactly match the case of the table name, and the
106: argument of "Fred" will be passed to SQL as the delimited identifier 'Fred'.
107: Passing a null will result in an error.
108: <p>
109: PURGE_ROWS:
110: If PURGE_ROWS is set to non-zero then a single pass is made through the table
111: which will purge committed deleted rows from the table. This space is then
112: available for future inserted rows, but remains allocated to the table.
113: As this option scans every page of the table, it's performance is linearly
114: related to the size of the table.
115: <p>
116: DEFRAGMENT_ROWS:
117: If DEFRAGMENT_ROWS is set to non-zero then a single defragment pass is made
118: which will move existing rows from the end of the table towards the front
119: of the table. The goal of the defragment run is to empty a set of pages
120: at the end of the table which can then be returned to the OS by the
121: TRUNCATE_END option. It is recommended to only run DEFRAGMENT_ROWS, if also
122: specifying the TRUNCATE_END option. This option scans the whole table and
123: needs to update index entries for every base table row move, and thus execution
124: time is linearly related to the size of the table.
125: <p>
126: TRUNCATE_END:
127: If TRUNCATE_END is set to non-zero then all contiguous pages at the end of
128: the table will be returned to the OS. Running the PURGE_ROWS and/or
129: DEFRAGMENT_ROWS passes options may increase the number of pages affected.
130: This option itself does no scans of the table, so performs on the order of a
131: few system calls.
132: <p>
133: SQL example:
134: To compress a table called CUSTOMER in a schema called US, using all
135: available compress options:
136: call SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE('US', 'CUSTOMER', 1, 1, 1);
137:
138: To quickly just return the empty free space at the end of the same table,
139: this option will run much quicker than running all phases but will likely
140: return much less space:
141: call SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE('US', 'CUSTOMER', 0, 0, 1);
142:
143: Java example:
144: To compress a table called CUSTOMER in a schema called US, using all
145: available compress options:
146:
147: CallableStatement cs = conn.prepareCall
148: ("CALL SYSCS_UTIL.SYSCS_COMPRESS_TABLE(?, ?, ?, ?, ?)");
149: cs.setString(1, "US");
150: cs.setString(2, "CUSTOMER");
151: cs.setShort(3, (short) 1);
152: cs.setShort(4, (short) 1);
153: cs.setShort(5, (short) 1);
154: cs.execute();
155:
156: To quickly just return the empty free space at the end of the same table,
157: this option will run much quicker than running all phases but will likely
158: return much less space:
159:
160: CallableStatement cs = conn.prepareCall
161: ("CALL SYSCS_UTIL.SYSCS_COMPRESS_TABLE(?, ?, ?, ?, ?)");
162: cs.setString(1, "US");
163: cs.setString(2, "CUSTOMER");
164: cs.setShort(3, (short) 0);
165: cs.setShort(4, (short) 0);
166: cs.setShort(5, (short) 1);
167: cs.execute();
168:
169: <p>
170: It is recommended that the SYSCS_UTIL.SYSCS_COMPRESS_TABLE procedure is
171: issued in auto-commit mode.
172: Note: This procedure acquires an exclusive table lock on the table being compressed. All statement plans dependent on the table or its indexes are invalidated. For information on identifying unused space, see the Derby Server and Administration Guide.
173:
174: TODO LIST:
175: o defragment requires table level lock in nested user transaction, which
176: will conflict with user lock on same table in user transaction.
177:
178: **/
179: public class OnlineCompress {
180:
181: /** no requirement for a constructor */
182: private OnlineCompress() {
183: }
184:
185: /**
186: * Implementation of SYSCS_UTIL.SYSCS_INPLACE_COMPRESS_TABLE().
187: * <p>
188: * Top level implementation of the system procedure. All the
189: * real work is found in the other routines in this file implementing
190: * the 3 phases of inplace compression: purge, defragment, and truncate.
191: * <p>
192: * @param schemaName schema name of table, required
193: * @param tableName table name to be compressed
194: * @param purgeRows if true, do a purge pass on the table
195: * @param defragmentRows if true, do a defragment pass on the table
196: * @param truncateEnd if true, return empty pages at end to OS.
197: *
198: * @exception SQLException Errors returned by throwing SQLException.
199: **/
200: public static void compressTable(String schemaName,
201: String tableName, boolean purgeRows,
202: boolean defragmentRows, boolean truncateEnd)
203: throws SQLException {
204: LanguageConnectionContext lcc = ConnectionUtil.getCurrentLCC();
205: TransactionController tc = lcc.getTransactionExecute();
206:
207: try {
208: DataDictionary data_dictionary = lcc.getDataDictionary();
209:
210: // Each of the following may give up locks allowing ddl on the
211: // table, so each phase needs to do the data dictionary lookup.
212: // The order is important as it makes sense to first purge
213: // deleted rows, then defragment existing non-deleted rows, and
214: // finally to truncate the end of the file which may have been
215: // made larger by the previous purge/defragment pass.
216:
217: if (purgeRows)
218: purgeRows(schemaName, tableName, data_dictionary, tc);
219:
220: if (defragmentRows)
221: defragmentRows(schemaName, tableName, data_dictionary,
222: tc);
223:
224: if (truncateEnd)
225: truncateEnd(schemaName, tableName, data_dictionary, tc);
226: } catch (StandardException se) {
227: throw PublicAPI.wrapStandardException(se);
228: }
229:
230: }
231:
232: /**
233: * Defragment rows in the given table.
234: * <p>
235: * Scans the rows at the end of a table and moves them to free spots
236: * towards the beginning of the table. In the same transaction all
237: * associated indexes are updated to reflect the new location of the
238: * base table row.
239: * <p>
240: * After a defragment pass, if was possible, there will be a set of
241: * empty pages at the end of the table which can be returned to the
242: * operating system by calling truncateEnd(). The allocation bit
243: * maps will be set so that new inserts will tend to go to empty and
244: * half filled pages starting from the front of the conglomerate.
245: *
246: * @param schemaName schema of table to defragement
247: * @param tableName name of table to defragment
248: * @param data_dictionary An open data dictionary to look up the table in.
249: * @param tc transaction controller to use to do updates.
250: *
251: **/
252: private static void defragmentRows(String schemaName,
253: String tableName, DataDictionary data_dictionary,
254: TransactionController tc) throws SQLException {
255: GroupFetchScanController base_group_fetch_cc = null;
256: int num_indexes = 0;
257:
258: int[][] index_col_map = null;
259: ScanController[] index_scan = null;
260: ConglomerateController[] index_cc = null;
261: DataValueDescriptor[][] index_row = null;
262:
263: LanguageConnectionContext lcc = ConnectionUtil.getCurrentLCC();
264: TransactionController nested_tc = null;
265:
266: try {
267:
268: SchemaDescriptor sd = data_dictionary.getSchemaDescriptor(
269: schemaName, nested_tc, true);
270: TableDescriptor td = data_dictionary.getTableDescriptor(
271: tableName, sd);
272: nested_tc = tc.startNestedUserTransaction(false);
273:
274: if (td == null) {
275: throw StandardException.newException(
276: SQLState.LANG_TABLE_NOT_FOUND, schemaName + "."
277: + tableName);
278: }
279:
280: switch (td.getTableType()) {
281: /* Skip views and vti tables */
282: case TableDescriptor.VIEW_TYPE:
283: case TableDescriptor.VTI_TYPE:
284: return;
285: // other types give various errors here
286: // DERBY-719,DERBY-720
287: default:
288: break;
289: }
290:
291: ConglomerateDescriptor heapCD = td
292: .getConglomerateDescriptor(td
293: .getHeapConglomerateId());
294:
295: /* Get a row template for the base table */
296: ExecRow baseRow = lcc.getExecutionContext()
297: .getExecutionFactory().getValueRow(
298: td.getNumberOfColumns());
299:
300: /* Fill the row with nulls of the correct type */
301: ColumnDescriptorList cdl = td.getColumnDescriptorList();
302: int cdlSize = cdl.size();
303:
304: for (int index = 0; index < cdlSize; index++) {
305: ColumnDescriptor cd = (ColumnDescriptor) cdl
306: .elementAt(index);
307: baseRow.setColumn(cd.getPosition(), cd.getType()
308: .getNull());
309: }
310:
311: DataValueDescriptor[][] row_array = new DataValueDescriptor[100][];
312: row_array[0] = baseRow.getRowArray();
313: RowLocation[] old_row_location_array = new RowLocation[100];
314: RowLocation[] new_row_location_array = new RowLocation[100];
315:
316: // Create the following 3 arrays which will be used to update
317: // each index as the scan moves rows about the heap as part of
318: // the compress:
319: // index_col_map - map location of index cols in the base row,
320: // ie. index_col_map[0] is column offset of 1st
321: // key column in base row. All offsets are 0
322: // based.
323: // index_scan - open ScanController used to delete old index row
324: // index_cc - open ConglomerateController used to insert new
325: // row
326:
327: ConglomerateDescriptor[] conglom_descriptors = td
328: .getConglomerateDescriptors();
329:
330: // conglom_descriptors has an entry for the conglomerate and each
331: // one of it's indexes.
332: num_indexes = conglom_descriptors.length - 1;
333:
334: // if indexes exist, set up data structures to update them
335: if (num_indexes > 0) {
336: // allocate arrays
337: index_col_map = new int[num_indexes][];
338: index_scan = new ScanController[num_indexes];
339: index_cc = new ConglomerateController[num_indexes];
340: index_row = new DataValueDescriptor[num_indexes][];
341:
342: setup_indexes(nested_tc, td, index_col_map, index_scan,
343: index_cc, index_row);
344:
345: }
346:
347: /* Open the heap for reading */
348: base_group_fetch_cc = nested_tc.defragmentConglomerate(td
349: .getHeapConglomerateId(), false, true,
350: TransactionController.OPENMODE_FORUPDATE,
351: TransactionController.MODE_TABLE,
352: TransactionController.ISOLATION_SERIALIZABLE);
353:
354: int num_rows_fetched = 0;
355: while ((num_rows_fetched = base_group_fetch_cc
356: .fetchNextGroup(row_array, old_row_location_array,
357: new_row_location_array)) != 0) {
358: if (num_indexes > 0) {
359: for (int row = 0; row < num_rows_fetched; row++) {
360: for (int index = 0; index < num_indexes; index++) {
361: fixIndex(row_array[row], index_row[index],
362: old_row_location_array[row],
363: new_row_location_array[row],
364: index_cc[index], index_scan[index],
365: index_col_map[index]);
366: }
367: }
368: }
369: }
370:
371: // TODO - It would be better if commits happened more frequently
372: // in the nested transaction, but to do that there has to be more
373: // logic to catch a ddl that might jump in the middle of the
374: // above loop and invalidate the various table control structures
375: // which are needed to properly update the indexes. For example
376: // the above loop would corrupt an index added midway through
377: // the loop if not properly handled. See DERBY-1188.
378: nested_tc.commit();
379:
380: } catch (StandardException se) {
381: throw PublicAPI.wrapStandardException(se);
382: } finally {
383: try {
384: /* Clean up before we leave */
385: if (base_group_fetch_cc != null) {
386: base_group_fetch_cc.close();
387: base_group_fetch_cc = null;
388: }
389:
390: if (num_indexes > 0) {
391: for (int i = 0; i < num_indexes; i++) {
392: if (index_scan != null && index_scan[i] != null) {
393: index_scan[i].close();
394: index_scan[i] = null;
395: }
396: if (index_cc != null && index_cc[i] != null) {
397: index_cc[i].close();
398: index_cc[i] = null;
399: }
400: }
401: }
402:
403: if (nested_tc != null) {
404: nested_tc.destroy();
405: }
406:
407: } catch (StandardException se) {
408: throw PublicAPI.wrapStandardException(se);
409: }
410: }
411:
412: return;
413: }
414:
415: /**
416: * Purge committed deleted rows from conglomerate.
417: * <p>
418: * Scans the table and purges any committed deleted rows from the
419: * table. If all rows on a page are purged then page is also
420: * reclaimed.
421: * <p>
422: *
423: * @param schemaName schema of table to defragement
424: * @param tableName name of table to defragment
425: * @param data_dictionary An open data dictionary to look up the table in.
426: * @param tc transaction controller to use to do updates.
427: *
428: **/
429: private static void purgeRows(String schemaName, String tableName,
430: DataDictionary data_dictionary, TransactionController tc)
431: throws StandardException {
432: SchemaDescriptor sd = data_dictionary.getSchemaDescriptor(
433: schemaName, tc, true);
434: TableDescriptor td = data_dictionary.getTableDescriptor(
435: tableName, sd);
436:
437: if (td == null) {
438: throw StandardException.newException(
439: SQLState.LANG_TABLE_NOT_FOUND, schemaName + "."
440: + tableName);
441: }
442:
443: switch (td.getTableType()) {
444: /* Skip views and vti tables */
445: case TableDescriptor.VIEW_TYPE:
446: case TableDescriptor.VTI_TYPE:
447: break;
448: // other types give various errors here
449: // DERBY-719,DERBY-720
450: default: {
451:
452: ConglomerateDescriptor[] conglom_descriptors = td
453: .getConglomerateDescriptors();
454:
455: for (int cd_idx = 0; cd_idx < conglom_descriptors.length; cd_idx++) {
456: ConglomerateDescriptor cd = conglom_descriptors[cd_idx];
457:
458: tc.purgeConglomerate(cd.getConglomerateNumber());
459: }
460: }
461: }
462:
463: return;
464: }
465:
466: /**
467: * Truncate end of conglomerate.
468: * <p>
469: * Returns the contiguous free space at the end of the table back to
470: * the operating system. Takes care of space allocation bit maps, and
471: * OS call to return the actual space.
472: * <p>
473: *
474: * @param schemaName schema of table to defragement
475: * @param tableName name of table to defragment
476: * @param data_dictionary An open data dictionary to look up the table in.
477: * @param tc transaction controller to use to do updates.
478: *
479: **/
480: private static void truncateEnd(String schemaName,
481: String tableName, DataDictionary data_dictionary,
482: TransactionController tc) throws StandardException {
483: SchemaDescriptor sd = data_dictionary.getSchemaDescriptor(
484: schemaName, tc, true);
485: TableDescriptor td = data_dictionary.getTableDescriptor(
486: tableName, sd);
487:
488: if (td == null) {
489: throw StandardException.newException(
490: SQLState.LANG_TABLE_NOT_FOUND, schemaName + "."
491: + tableName);
492: }
493:
494: switch (td.getTableType()) {
495: /* Skip views and vti tables */
496: case TableDescriptor.VIEW_TYPE:
497: case TableDescriptor.VTI_TYPE:
498: break;
499: // other types give various errors here
500: // DERBY-719,DERBY-720
501: default: {
502: ConglomerateDescriptor[] conglom_descriptors = td
503: .getConglomerateDescriptors();
504:
505: for (int cd_idx = 0; cd_idx < conglom_descriptors.length; cd_idx++) {
506: ConglomerateDescriptor cd = conglom_descriptors[cd_idx];
507:
508: tc.compressConglomerate(cd.getConglomerateNumber());
509: }
510: }
511: }
512:
513: return;
514: }
515:
516: private static void setup_indexes(TransactionController tc,
517: TableDescriptor td, int[][] index_col_map,
518: ScanController[] index_scan,
519: ConglomerateController[] index_cc,
520: DataValueDescriptor[][] index_row) throws StandardException {
521:
522: // Initialize the following 3 arrays which will be used to update
523: // each index as the scan moves rows about the heap as part of
524: // the compress:
525: // index_col_map - map location of index cols in the base row, ie.
526: // index_col_map[0] is column offset of 1st key
527: // column in base row. All offsets are 0 based.
528: // index_scan - open ScanController used to delete old index row
529: // index_cc - open ConglomerateController used to insert new row
530:
531: ConglomerateDescriptor[] conglom_descriptors = td
532: .getConglomerateDescriptors();
533:
534: int index_idx = 0;
535: for (int cd_idx = 0; cd_idx < conglom_descriptors.length; cd_idx++) {
536: ConglomerateDescriptor index_cd = conglom_descriptors[cd_idx];
537:
538: if (!index_cd.isIndex()) {
539: // skip the heap descriptor entry
540: continue;
541: }
542:
543: // ScanControllers are used to delete old index row
544: index_scan[index_idx] = tc.openScan(
545: index_cd.getConglomerateNumber(),
546: true, // hold
547: TransactionController.OPENMODE_FORUPDATE,
548: TransactionController.MODE_TABLE,
549: TransactionController.ISOLATION_SERIALIZABLE, null, // full row is retrieved,
550: // so that full row can be used for start/stop keys
551: null, // startKeyValue - will be reset with reopenScan()
552: 0, //
553: null, // qualifier
554: null, // stopKeyValue - will be reset with reopenScan()
555: 0); //
556:
557: // ConglomerateControllers are used to insert new index row
558: index_cc[index_idx] = tc.openConglomerate(
559: index_cd.getConglomerateNumber(),
560: true, // hold
561: TransactionController.OPENMODE_FORUPDATE,
562: TransactionController.MODE_TABLE,
563: TransactionController.ISOLATION_SERIALIZABLE);
564:
565: // build column map to allow index row to be built from base row
566: int[] baseColumnPositions = index_cd.getIndexDescriptor()
567: .baseColumnPositions();
568: int[] zero_based_map = new int[baseColumnPositions.length];
569:
570: for (int i = 0; i < baseColumnPositions.length; i++) {
571: zero_based_map[i] = baseColumnPositions[i] - 1;
572: }
573:
574: index_col_map[index_idx] = zero_based_map;
575:
576: // build row array to delete from index and insert into index
577: // length is length of column map + 1 for RowLocation.
578: index_row[index_idx] = new DataValueDescriptor[baseColumnPositions.length + 1];
579:
580: index_idx++;
581: }
582:
583: return;
584: }
585:
586: /**
587: * Delete old index row and insert new index row in input index.
588: * <p>
589: *
590: * @param base_row all columns of base row
591: * @param index_row an index row template, filled in by this routine
592: * @param old_row_loc old location of base row, used to delete index
593: * @param new_row_loc new location of base row, used to update index
594: * @param index_cc index conglomerate to insert new row
595: * @param index_scan index scan to delete old entry
596: * @param index_col_map description of mapping of index row to base row,
597: *
598: *
599: * @exception StandardException Standard exception policy.
600: **/
601: private static void fixIndex(DataValueDescriptor[] base_row,
602: DataValueDescriptor[] index_row, RowLocation old_row_loc,
603: RowLocation new_row_loc, ConglomerateController index_cc,
604: ScanController index_scan, int[] index_col_map)
605: throws StandardException {
606: if (SanityManager.DEBUG) {
607: // baseColumnPositions should describe all columns in index row
608: // except for the final column, which is the RowLocation.
609: SanityManager.ASSERT(index_col_map != null);
610: SanityManager.ASSERT(index_row != null);
611: SanityManager
612: .ASSERT((index_col_map.length == (index_row.length - 1)));
613: }
614:
615: // create the index row to delete from from the base row, using map
616: for (int index = 0; index < index_col_map.length; index++) {
617: index_row[index] = base_row[index_col_map[index]];
618: }
619: // last column in index in the RowLocation
620: index_row[index_row.length - 1] = old_row_loc;
621:
622: // position the scan for the delete, the scan should already be open.
623: // This is done by setting start scan to full key, GE and stop scan
624: // to full key, GT.
625: index_scan.reopenScan(index_row, ScanController.GE,
626: (Qualifier[][]) null, index_row, ScanController.GT);
627:
628: // position the scan, serious problem if scan does not find the row.
629: if (index_scan.next()) {
630: index_scan.delete();
631: } else {
632: // Didn't find the row we wanted to delete.
633: if (SanityManager.DEBUG) {
634: SanityManager.THROWASSERT("Did not find row to delete."
635: + "base_row = " + RowUtil.toString(base_row)
636: + "index_row = " + RowUtil.toString(index_row));
637: }
638: }
639:
640: // insert the new index row into the conglomerate
641: index_row[index_row.length - 1] = new_row_loc;
642:
643: index_cc.insert(index_row);
644:
645: return;
646: }
647: }
|