001: // You can redistribute this software and/or modify it under the terms of
002: // the Ozone Core License version 1 published by ozone-db.org.
003: //
004: // The original code and portions created by SMB are
005: // Copyright (C) 1997-@year@ by SMB GmbH. All rights reserved.
006: //
007: // $Id: PersistenceSpace.java,v 1.1 2001/12/18 10:31:31 per_nyfelt Exp $
008:
009: package org.ozoneDB.core.classicStore;
010:
011: import org.ozoneDB.core.*;
012: import org.ozoneDB.util.*;
013: import org.ozoneDB.DxLib.*;
014: import java.io.*;
015:
016: /** */
017: public class PersistenceSpace extends Object {
018: final static String TRANSACTION_FLAG = "transaction";
019: final static int TRANSACTION_FLAG_VERSION = 1;
020: final static int PROPS_FILE_VERSION = 1;
021:
022: final static String CID = "ozoneDB.classicStore.clusterID";
023:
024: Env env;
025: ClassicStore classicStore;
026:
027: Cluster currentCluster;
028: TransactionID currentTransaction;
029: DxSet touchedClusters;
030: DxSet clustersToCompress;
031:
032: public PersistenceSpace(Env _env) {
033: env = _env;
034: classicStore = (ClassicStore) env.store;
035: }
036:
037: /**
038: */
039: protected boolean startup() throws Exception {
040: //env.logWriter.newEntry (this, "PersistenceSpace.open", LogWriter.DEBUG);
041: File transFile = new File(env.dir + Env.DATA_DIR,
042: TRANSACTION_FLAG);
043: if (transFile.exists()) {
044: // we had a crash (transaction abort while commiting):
045: // rollback the transaction to get a consitent database
046: rollBackTransaction(transFile);
047: }
048:
049: if (!readProperties()) {
050: // check, if the datadir is empty, i.e. we start the first time
051: String[] list = new File(env.dir + Env.DATA_DIR).list();
052: if (list.length != 0) {
053: recover();
054: } else {
055: newCluster();
056: }
057: }
058: return true;
059: }
060:
061: /**
062: */
063: protected boolean shutdown() throws Exception {
064: //env.logWriter.newEntry (this, "PersistenceSpace.close", LogWriter.DEBUG);
065: if (currentCluster != null) {
066: writeProperties();
067: currentCluster.close();
068: }
069:
070: currentCluster = null;
071: touchedClusters = null;
072: clustersToCompress = null;
073:
074: return true;
075: }
076:
077: /**
078: */
079: protected boolean readProperties() {
080: ClusterID cid = (ClusterID) env.state.property(CID, null);
081: if (cid == null) {
082: return false;
083: }
084:
085: currentCluster = new Cluster(env, classicStore, cid);
086: return true;
087: }
088:
089: /**
090: */
091: protected void writeProperties() throws Exception {
092: env.state.setProperty(CID, currentCluster.cluID());
093: }
094:
095: /**
096: * begins a transaction commit with setting the transaction label
097: */
098: protected void startTransaction(TransactionID tid) throws Exception {
099: //env.logWriter.newEntry ("PersistenceSpace.beginTransaction: " + tid, LogWriter.DEBUG);
100: currentTransaction = tid;
101: touchedClusters = new DxHashSet();
102: clustersToCompress = new DxHashSet();
103:
104: // write the transaction flag to harddisk
105: FileOutputStream fo = new FileOutputStream(new File(env.dir
106: + Env.DATA_DIR, TRANSACTION_FLAG));
107: DataOutputStream out = new DataOutputStream(fo);
108: out.writeInt(TRANSACTION_FLAG_VERSION);
109: out.writeLong(currentTransaction.value());
110: // rescue the current cluster id
111: out.writeLong(currentCluster.cluID().value());
112: out.close();
113: }
114:
115: /** */
116: protected void prepareCommitTransaction(TransactionID tid)
117: throws Exception {
118: // close the current cluster stream
119: currentCluster.close();
120:
121: // remove now the deleted clusters from disk
122: DxIterator it = clustersToCompress.iterator();
123: // 1 : compress all clusters
124: while (it.next() != null) {
125: compressCluster((ClusterID) it.object());
126: }
127: // 2 : if everything was fine, remove the cluster files
128: it.reset();
129: while (it.next() != null) {
130: new Cluster(env, classicStore, (ClusterID) it.object())
131: .removeFromDisk();
132: }
133: }
134:
135: /** */
136: protected void commitTransaction(TransactionID tid) {
137: // remove the transaction label
138: File f = new File(env.dir + Env.DATA_DIR, TRANSACTION_FLAG);
139: if (f.exists()) {
140: f.delete();
141: }
142:
143: //env.logWriter.newEntry ("PersistenceSpace.endTransaction: " + currentTransaction, LogWriter.DEBUG);
144: touchedClusters = null;
145: clustersToCompress = null;
146: currentTransaction = null;
147: }
148:
149: /** */
150: protected void abortTransaction(TransactionID tid) {
151: }
152:
153: /**
154: */
155: private void registerCluster(ClusterID cid) throws Exception {
156: if (!touchedClusters.contains(cid)) {
157: touchedClusters.add(cid);
158:
159: // write the cluster id
160: FileOutputStream fo = new FileOutputStream(new File(env.dir
161: + Env.DATA_DIR, TRANSACTION_FLAG).toString(), true);
162: DataOutputStream out = new DataOutputStream(fo);
163: out.writeLong(cid.value());
164: out.close();
165: }
166: }
167:
168: /**
169: */
170: private Cluster newCluster() throws Exception {
171: // close the old cluster stream before creating a new one
172: Cluster oldCluster = null;
173: if (currentCluster != null) {
174: oldCluster = currentCluster;
175: currentCluster.close();
176: }
177:
178: // retieve a new clusterid and create a cluster
179: currentCluster = new Cluster(env, classicStore, new ClusterID(
180: env.nextID()));
181:
182: // check, if the last cluster has to be compressed;
183: // this can't be done in writeLeak() while the cluster is open
184: if (oldCluster != null && oldCluster.needsCompressing()) {
185: clustersToCompress.add(oldCluster.cluID());
186: }
187:
188: // save the current cluster
189: writeProperties();
190:
191: return currentCluster;
192: }
193:
194: /** */
195: protected Cluster readCluster(ClusterID cid, int whatToRead)
196: throws Exception {
197: //env.logWriter.newEntry ("PersistenceSpace.readCluster: " + cid, LogWriter.DEBUG);
198: // opening the same file for writing _and_ reading causes trouble
199: if (cid.equals(currentCluster.cluID())) {
200: currentCluster.close();
201: }
202:
203: Cluster cl = new Cluster(env, classicStore, cid);
204: cl.readObjects(whatToRead, null);
205:
206: // reopen, if necessary
207: if (cid.equals(currentCluster.cluID())) {
208: currentCluster.open();
209: }
210:
211: return cl;
212: }
213:
214: /**
215: */
216: protected void compressCluster(ClusterID cid) throws Exception {
217: //env.logWriter.newEntry ("PersistanceSpace.compressCluster: " + cid, LogWriter.DEBUG);
218: Cluster cl = new Cluster(env, classicStore, cid);
219: cl.readObjects(Cluster.DATA, null);
220:
221: DeathObject dobj;
222: DxIterator it = cl.objects().iterator();
223: while ((dobj = (DeathObject) it.next()) != null) {
224: writeObject(dobj, false, false);
225: }
226: }
227:
228: /**
229: */
230: protected ClusterID[] allClusters() {
231: File path = new File(env.dir + Env.DATA_DIR);
232: String[] fileList = path.list(new FilenameFilter() {
233:
234: public boolean accept(File dir, String name) {
235: return name.endsWith(Cluster.CLUSTER_FILE_SUFF);
236: }
237: });
238: ClusterID[] result = new ClusterID[fileList.length];
239:
240: for (int i = 0; i < fileList.length; i++) {
241: result[i] = new ClusterID(Long.parseLong(fileList[i]
242: .substring(0, fileList[i].length()
243: - Cluster.CLUSTER_FILE_SUFF.length())));
244: }
245:
246: return result;
247: }
248:
249: /**
250: */
251: protected ClusterID writeObject(DeathObject dobj,
252: boolean serialize, boolean useClone) throws Exception {
253: //env.logWriter.newEntry ("PersistenceSpace.writeObject: " + dobj.objID(), LogWriter.DEBUG);
254: // create new cluster if necessary
255: if (currentCluster.size() > Cluster.MAX_SIZE) {
256: newCluster();
257: }
258:
259: // assign the current cluster to the current transaction
260: // we have to that _before_ writing the object, because if something
261: // goes wrong while registering the operation isn't performed
262: // and the database stays consistent
263: registerCluster(currentCluster.cluID());
264:
265: // at first set the object's clusterId and then write the object
266: dobj.container().setClusterID(currentCluster.cluID());
267: currentCluster.appendObject(dobj, currentTransaction,
268: serialize, useClone);
269: return currentCluster.cluID();
270: }
271:
272: /**
273: */
274: protected void writeLeak(ClusterID cid, DeathObject dobj)
275: throws Exception {
276: //env.logWriter.newEntry ("PersistenceSpace.writeLeak: " + cid + " : " + dobj.objID(), LogWriter.DEBUG);
277:
278: // assign the touched cluster to the current transaction
279: // we have to that _before_ writeing the leak, because if something
280: // goes wrong while registering the operation isn't performed
281: // and the database stays consistent
282: registerCluster(cid);
283:
284: // write the leak
285: Cluster cl = new Cluster(env, classicStore, cid);
286: cl.writeLeak(dobj, currentTransaction);
287:
288: // we must not compress the current cluster ! This is technical
289: // (we can't open the same file for read _and_ read at the same time)
290: // and logical (we can't append the objects of the cluster to the
291: // cluster itself) not possible. The current cluster will be
292: // compressed in newCluster()
293: if (currentCluster.cluID().equals(cid)) {
294: return;
295: }
296:
297: // retrieve the cluster size simply of it's file size
298: // this is much faster than reading the whole cluster
299: long clSize = cl.fileHandle().length();
300: if (clSize > 0) {
301: //env.logWriter.newEntry ("LEAK_WEIGHT = " + cl.leakSize() + " / " + clSize, LogWriter.DEBUG);
302: if ((double) cl.leakSize() / clSize > Cluster.LEAK_WEIGHT) {
303: clustersToCompress.add(cid);
304: }
305: }
306: }
307:
308: /**
309: */
310: protected void fillObjectSpace() {
311: env.logWriter.newEntry(this , "ObjectSpace recovery ...",
312: LogWriter.INFO);
313: int count = 0;
314: ClusterID[] clusters = allClusters();
315: for (int i = 0; i < clusters.length; i++) {
316: try {
317: ObjectContainer os;
318: Cluster cl = new Cluster(env, classicStore, clusters[i]);
319: cl.readObjects(Cluster.STATE, null);
320: DxIterator it = cl.objects().iterator();
321: while ((os = (ObjectContainer) it.next()) != null) {
322: ((ClassicStore) env.store).objectSpace
323: .deleteObject(os);
324: ((ClassicStore) env.store).objectSpace
325: .addObject(os);
326: count++;
327: //env.logWriter.newEntry ("adding: " + os.id(), LogWriter.DEBUG);
328: }
329: } catch (Exception e) {
330: env.fatalError(this ,
331: "fillObjectSpace: " + e.toString(), e);
332: }
333: }
334: env.logWriter.newEntry(this , count + " objects found.",
335: LogWriter.INFO);
336: }
337:
338: /**
339: * do some recover stuff besides transaction rollback
340: */
341: protected void recover() {
342: }
343:
344: /**
345: */
346: protected void rollBackTransaction(File transFlag) throws Exception {
347: TransactionID rollBackTid = null;
348: DxBag clusters = new DxArrayBag();
349: try {
350: // open the flag file
351: FileInputStream fi = new FileInputStream(transFlag);
352: DataInputStream in = new DataInputStream(fi);
353:
354: in.readInt();
355: rollBackTid = new TransactionID(in.readLong());
356: // recover the current cluster
357: currentCluster = new Cluster(env, classicStore,
358: new ClusterID(in.readLong()));
359: // get all assigned clusters
360: while (fi.available() != 0) {
361: clusters.add(new ClusterID(in.readLong()));
362: }
363:
364: in.close();
365: } catch (IOException e) {
366: env.logWriter.newEntry(this ,
367: "rollback transaction: flag file corrupted",
368: LogWriter.WARN);
369: }
370:
371: //env.logWriter.newEntry ("rollback transaction: " + rollBackTid + " with " + clusters.count() + " clusters", LogWriter.DEBUG);
372:
373: // rollback the clusters
374: ClusterID cid;
375: DxIterator it = clusters.iterator();
376: while ((cid = (ClusterID) it.next()) != null) {
377: Cluster cl = new Cluster(env, classicStore, cid);
378: //env.logWriter.newEntry ("rollback : " + cid, LogWriter.DEBUG);
379: cl.rollBack(rollBackTid);
380: }
381:
382: // save the recovered properties
383: writeProperties();
384:
385: transFlag.delete();
386: touchedClusters = null;
387: clustersToCompress = null;
388: currentTransaction = null;
389: }
390: }
|