001: /* Copyright (c) 2001-2005, The HSQL Development Group
002: * All rights reserved.
003: *
004: * Redistribution and use in source and binary forms, with or without
005: * modification, are permitted provided that the following conditions are met:
006: *
007: * Redistributions of source code must retain the above copyright notice, this
008: * list of conditions and the following disclaimer.
009: *
010: * Redistributions in binary form must reproduce the above copyright notice,
011: * this list of conditions and the following disclaimer in the documentation
012: * and/or other materials provided with the distribution.
013: *
014: * Neither the name of the HSQL Development Group nor the names of its
015: * contributors may be used to endorse or promote products derived from this
016: * software without specific prior written permission.
017: *
018: * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
019: * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
020: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
021: * ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
022: * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
023: * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
024: * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
025: * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
026: * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
027: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
028: * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
029: */
030:
031: package org.hsqldb.persist;
032:
033: import java.io.BufferedOutputStream;
034: import java.io.IOException;
035: import java.io.OutputStream;
036:
037: import org.hsqldb.Database;
038: import org.hsqldb.HsqlException;
039: import org.hsqldb.Session;
040: import org.hsqldb.Table;
041: import org.hsqldb.Trace;
042: import org.hsqldb.index.RowIterator;
043: import org.hsqldb.lib.DoubleIntIndex;
044: import org.hsqldb.lib.HsqlArrayList;
045: import org.hsqldb.lib.StopWatch;
046: import org.hsqldb.lib.Storage;
047: import org.hsqldb.rowio.RowInputInterface;
048: import org.hsqldb.rowio.RowOutputBinary;
049:
050: // oj@openoffice.org - changed to file access api
051:
052: /**
053: * Routine to defrag the *.data file.
054: *
055: * This method iterates over the primary index of a table to find the
056: * disk position for each row and stores it, together with the new position
057: * in an array.
058: *
059: * A second pass over the primary index writes each row to the new disk
060: * image after translating the old pointers to the new.
061: *
062: * @author fredt@users
063: * @version 1.8.0
064: * @since 1.7.2
065: */
066: final class DataFileDefrag {
067:
068: BufferedOutputStream fileStreamOut;
069: long fileOffset;
070: StopWatch stopw = new StopWatch();
071: String filename;
072: int[][] rootsList;
073: Database database;
074: DataFileCache cache;
075: int scale;
076: DoubleIntIndex transactionRowLookup;
077:
078: DataFileDefrag(Database db, DataFileCache cache, String filename) {
079:
080: this .database = db;
081: this .cache = cache;
082: this .scale = cache.cacheFileScale;
083: this .filename = filename;
084: }
085:
086: void process() throws HsqlException, IOException {
087:
088: boolean complete = false;
089:
090: Trace.printSystemOut("Defrag Transfer begins");
091:
092: transactionRowLookup = database.txManager
093: .getTransactionIDList();
094:
095: HsqlArrayList allTables = database.schemaManager.getAllTables();
096:
097: rootsList = new int[allTables.size()][];
098:
099: Storage dest = null;
100:
101: try {
102: OutputStream fos = database.getFileAccess()
103: .openOutputStreamElement(filename + ".new");
104:
105: fileStreamOut = new BufferedOutputStream(fos, 1 << 12);
106:
107: for (int i = 0; i < DataFileCache.INITIAL_FREE_POS; i++) {
108: fileStreamOut.write(0);
109: }
110:
111: fileOffset = DataFileCache.INITIAL_FREE_POS;
112:
113: for (int i = 0, tSize = allTables.size(); i < tSize; i++) {
114: Table t = (Table) allTables.get(i);
115:
116: if (t.getTableType() == Table.CACHED_TABLE) {
117: int[] rootsArray = writeTableToDataFile(t);
118:
119: rootsList[i] = rootsArray;
120: } else {
121: rootsList[i] = null;
122: }
123:
124: Trace.printSystemOut(t.getName().name + " complete");
125: }
126:
127: writeTransactionRows();
128: fileStreamOut.close();
129:
130: fileStreamOut = null;
131:
132: // write out the end of file position
133: dest = ScaledRAFile.newScaledRAFile(database, filename
134: + ".new", false, ScaledRAFile.DATA_FILE_RAF,
135: database.getURLProperties().getProperty(
136: "storage_class_name"), database
137: .getURLProperties().getProperty(
138: "storage_key"));
139:
140: dest.seek(DataFileCache.LONG_FREE_POS_POS);
141: dest.writeLong(fileOffset);
142: dest.close();
143:
144: dest = null;
145:
146: for (int i = 0, size = rootsList.length; i < size; i++) {
147: int[] roots = rootsList[i];
148:
149: if (roots != null) {
150: Trace.printSystemOut(org.hsqldb.lib.StringUtil
151: .getList(roots, ",", ""));
152: }
153: }
154:
155: complete = true;
156: } catch (IOException e) {
157: throw Trace.error(Trace.FILE_IO_ERROR, filename + ".new");
158: } catch (OutOfMemoryError e) {
159: throw Trace.error(Trace.OUT_OF_MEMORY);
160: } finally {
161: if (fileStreamOut != null) {
162: fileStreamOut.close();
163: }
164:
165: if (dest != null) {
166: dest.close();
167: }
168:
169: if (!complete) {
170: database.getFileAccess().removeElement(
171: filename + ".new");
172: }
173: }
174:
175: //Trace.printSystemOut("Transfer complete: ", stopw.elapsedTime());
176: }
177:
178: /**
179: * called from outside after the complete end of defrag
180: */
181: void updateTableIndexRoots() throws HsqlException {
182:
183: HsqlArrayList allTables = database.schemaManager.getAllTables();
184:
185: for (int i = 0, size = allTables.size(); i < size; i++) {
186: Table t = (Table) allTables.get(i);
187:
188: if (t.getTableType() == Table.CACHED_TABLE) {
189: int[] rootsArray = rootsList[i];
190:
191: t.setIndexRoots(rootsArray);
192: }
193: }
194: }
195:
196: /**
197: * called from outside after the complete end of defrag
198: */
199: void updateTransactionRowIDs() throws HsqlException {
200: database.txManager.convertTransactionIDs(transactionRowLookup);
201: }
202:
203: int[] writeTableToDataFile(Table table) throws IOException,
204: HsqlException {
205:
206: Session session = database.getSessionManager().getSysSession();
207: RowOutputBinary rowOut = new RowOutputBinary();
208: DoubleIntIndex pointerLookup = new DoubleIntIndex(table
209: .getPrimaryIndex().sizeEstimate(), false);
210: int[] rootsArray = table.getIndexRootsArray();
211: long pos = fileOffset;
212: int count = 0;
213:
214: pointerLookup.setKeysSearchTarget();
215: Trace.printSystemOut("lookup begins: " + stopw.elapsedTime());
216:
217: RowIterator it = table.rowIterator(session);
218:
219: for (; it.hasNext(); count++) {
220: CachedObject row = (CachedObject) it.next();
221:
222: pointerLookup
223: .addUnsorted(row.getPos(), (int) (pos / scale));
224:
225: if (count % 50000 == 0) {
226: Trace.printSystemOut("pointer pair for row " + count
227: + " " + row.getPos() + " " + pos);
228: }
229:
230: pos += row.getStorageSize();
231: }
232:
233: Trace.printSystemOut(table.getName().name + " list done ",
234: stopw.elapsedTime());
235:
236: count = 0;
237: it = table.rowIterator(session);
238:
239: for (; it.hasNext(); count++) {
240: CachedObject row = it.next();
241:
242: rowOut.reset();
243: row.write(rowOut, pointerLookup);
244: fileStreamOut.write(rowOut.getOutputStream().getBuffer(),
245: 0, rowOut.size());
246:
247: fileOffset += row.getStorageSize();
248:
249: if ((count) % 50000 == 0) {
250: Trace.printSystemOut(count + " rows "
251: + stopw.elapsedTime());
252: }
253: }
254:
255: for (int i = 0; i < rootsArray.length; i++) {
256: if (rootsArray[i] == -1) {
257: continue;
258: }
259:
260: int lookupIndex = pointerLookup
261: .findFirstEqualKeyIndex(rootsArray[i]);
262:
263: if (lookupIndex == -1) {
264: throw Trace.error(Trace.DATA_FILE_ERROR);
265: }
266:
267: rootsArray[i] = pointerLookup.getValue(lookupIndex);
268: }
269:
270: setTransactionRowLookups(pointerLookup);
271: Trace.printSystemOut(table.getName().name
272: + " : table converted");
273:
274: return rootsArray;
275: }
276:
277: void setTransactionRowLookups(DoubleIntIndex pointerLookup) {
278:
279: for (int i = 0, size = transactionRowLookup.size(); i < size; i++) {
280: int key = transactionRowLookup.getKey(i);
281: int lookupIndex = pointerLookup.findFirstEqualKeyIndex(key);
282:
283: if (lookupIndex != -1) {
284: transactionRowLookup.setValue(i, pointerLookup
285: .getValue(lookupIndex));
286: }
287: }
288: }
289:
290: void writeTransactionRows() {
291:
292: for (int i = 0, size = transactionRowLookup.size(); i < size; i++) {
293: if (transactionRowLookup.getValue(i) != 0) {
294: continue;
295: }
296:
297: int key = transactionRowLookup.getKey(i);
298:
299: try {
300: transactionRowLookup.setValue(i,
301: (int) (fileOffset / scale));
302:
303: RowInputInterface rowIn = cache.readObject(key);
304:
305: fileStreamOut.write(rowIn.getBuffer(), 0, rowIn
306: .getSize());
307:
308: fileOffset += rowIn.getSize();
309: } catch (IOException e) {
310: }
311: }
312: }
313: }
|