001: // You can redistribute this software and/or modify it under the terms of
002: // the Ozone Core License version 1 published by ozone-db.org.
003: //
004: // The original code and portions created by SMB are
005: // Copyright (C) 1997-@year@ by SMB GmbH. All rights reserved.
006: //
007: // $Id: WizardStore.java,v 1.2 2002/06/08 00:49:39 mediumnet Exp $
008:
009: package org.ozoneDB.core.wizardStore;
010:
011: import java.io.*;
012: import org.ozoneDB.DxLib.*;
013: import org.ozoneDB.*;
014: import org.ozoneDB.core.*;
015: import org.ozoneDB.util.*;
016:
017: /**
018: * @author <a href="http://www.softwarebuero.de/">SMB</a>
019: * @author <a href="http://www.medium.net/">Medium.net</a>
020: * @version $Revision: 1.2 $Date: 2002/06/08 00:49:39 $
021: */
022: public final class WizardStore extends ServerComponent implements
023: StoreManager {
024:
025: protected final static String ID_TABLE_NAME = "idTable.wizard";
026:
027: protected final static String NAME_TABLE_NAME = "nameTable.wizard";
028:
029: protected final static String COMMIT_FLAG_NAME = "commitflag.wizard";
030:
031: /**
032: * Maps ObjectIDs to ClusterIDs
033: */
034: protected DxMap idTable;
035:
036: /**
037: * Maps names to ObjectIDs
038: */
039: protected DxMap nameTable;
040:
041: protected ClusterStore clusterStore;
042:
043: /**
044: The garbage collector. It should be notified in the event
045: <UL>
046: <LI>that a formerly unnamed object receives a name.</LI>
047: <LI>that an object is freshly created</LI>
048: </LI>
049: */
050: protected GarbageCollector garbageCollector;
051:
052: public WizardStore(Env env) {
053: super (env);
054: }
055:
056: public synchronized void init(Env _env) {
057: env = _env;
058:
059: int idTableBufferSize = env.config.intProperty(
060: Setup.WS_TABLE_BUFF_SIZE, -1);
061: int idTableCacheSize = env.config.intProperty(
062: Setup.WS_TABLE_CACHE_SIZE, -1);
063: int idTableSubtableSize = env.config.intProperty(
064: Setup.WS_TABLE_SUBTABLE_SIZE, -1);
065:
066: idTable = new IDTable(env.dir + "ostab" + File.separator
067: + "tab", idTableBufferSize, idTableCacheSize,
068: idTableSubtableSize);
069: // idTable = new DxHashMap (10000);
070: nameTable = new DxHashMap(100);
071:
072: clusterStore = new ClusterStore(_env);
073:
074: this .garbageCollector = env.getGarbageCollector();
075: }
076:
077: public synchronized void startup() throws Exception {
078: env.logWriter.newEntry(this , "startup...", LogWriter.INFO);
079: clusterStore.startup();
080:
081: env.logWriter.newEntry(this ,
082: "checking for pending shadow clusters...",
083: LogWriter.INFO);
084:
085: boolean isCleanShutdown = isCleanShutdown()
086: && clusterStore.isCleanShutdown();
087:
088: boolean isSuccessfullyStarted = false;
089:
090: if (isCleanShutdown) {
091: ObjectInputStream nameTableIn = null;
092: ObjectInputStream idTableIn = null;
093: try {
094: // restore nameTable
095: nameTableIn = new ObjectInputStream(
096: new FileInputStream(env.dir + NAME_TABLE_NAME));
097: int count = nameTableIn.readInt();
098: for (int i = 0; i < count; i++) {
099: nameTable.addForKey(nameTableIn.readObject(),
100: nameTableIn.readObject());
101: }
102: nameTableIn.close();
103:
104: // restore idTable
105: if (!(idTable instanceof DxDiskHashMap)) {
106: idTableIn = new ObjectInputStream(
107: new FileInputStream(env.dir + ID_TABLE_NAME));
108: count = idTableIn.readInt();
109: for (int i = 0; i < count; i++) {
110: idTable.addForKey(idTableIn.readObject(),
111: idTableIn.readObject());
112: }
113: idTableIn.close();
114: } else {
115: ((DxDiskHashMap) idTable).re_use();
116: ((DxDiskHashMap) idTable).setReusable(true);
117: }
118:
119: isSuccessfullyStarted = true;
120: } catch (Exception e) {
121: env.logWriter.newEntry(this ,
122: " error while starting up... ",
123: LogWriter.INFO);
124: env.logWriter.newEntry(this , " exception: ", e,
125: LogWriter.DEBUG);
126: } finally {
127: if (nameTableIn != null) {
128: nameTableIn.close();
129: }
130: if (idTableIn != null) {
131: idTableIn.close();
132: }
133: }
134: }
135:
136: if (!isCleanShutdown || !isSuccessfullyStarted) {
137: env.logWriter.newEntry(this , " recovering...",
138: LogWriter.INFO);
139: recover();
140: }
141:
142: env.logWriter.newEntry(this , " " + idTable.count()
143: + " IDs, " + nameTable.count() + " name(s))",
144: LogWriter.INFO);
145: }
146:
147: public synchronized void shutdown() throws Exception {
148: env.logWriter.newEntry(this , "shutdown...", LogWriter.INFO);
149: clusterStore.shutdown();
150:
151: commitNameTable();
152:
153: commitIDTable();
154: if (idTable instanceof DxDiskHashMap) {
155: ((DxDiskHashMap) idTable).printStatistics();
156: ((DxDiskHashMap) idTable).close();
157: }
158: }
159:
160: public void save() throws Exception {
161: }
162:
163: public DxIterator objectIDIterator() {
164: return idTable.iterator();
165: }
166:
167: protected void commitNameTable() throws IOException {
168: env.logWriter.newEntry(this , "commitNameTable...",
169: LogWriter.DEBUG3);
170:
171: String filename = env.dir + NAME_TABLE_NAME;
172: ObjectOutputStream out = new ObjectOutputStream(
173: new FileOutputStream(filename));
174: try {
175: out.writeInt(nameTable.count());
176: DxIterator it = nameTable.iterator();
177: while (it.next() != null) {
178: out.writeObject(it.object());
179: out.writeObject(it.key());
180: }
181: } catch (Exception e) {
182: new File(filename).delete();
183: } finally {
184: out.close();
185: }
186: }
187:
188: protected void commitIDTable() throws IOException {
189: env.logWriter.newEntry(this , "commitIDTable...",
190: LogWriter.DEBUG3);
191:
192: if (!(idTable instanceof DxDiskHashMap)) {
193: String filename = env.dir + ID_TABLE_NAME;
194: ObjectOutputStream out = new ObjectOutputStream(
195: new FileOutputStream(filename));
196:
197: try {
198: out.writeInt(idTable.count());
199: DxIterator it = idTable.iterator();
200: while (it.next() != null) {
201: out.writeObject(it.object());
202: out.writeObject(it.key());
203: }
204: } catch (Exception e) {
205: new File(filename).delete();
206: } finally {
207: out.close();
208: }
209: } else {
210: ((IDTable) idTable).setReusable(false);
211: ((IDTable) idTable).writeDirtyTables();
212: ((IDTable) idTable).setReusable(true);
213: }
214: }
215:
216: /**
217: * Fill idTable and nameTable from the information that are stored in the
218: * clusters directly.
219: *
220: * @throws Exception If a cluster cannot be read.
221: */
222: protected synchronized void recover() throws Exception {
223: env.logWriter.newEntry(this , " cleaning ID table...",
224: LogWriter.INFO);
225: ((DxDiskHashMap) idTable).cleanFiles();
226: ((DxDiskHashMap) idTable).clear();
227:
228: env.logWriter.newEntry(this , " cleaning name table...",
229: LogWriter.INFO);
230: nameTable.clear();
231:
232: DxSet cids = clusterStore.recoverClusterIDs();
233:
234: DxIterator it = cids.iterator();
235: while (it.next() != null) {
236: ClusterID cid = (ClusterID) it.object();
237:
238: Cluster cluster = null;
239: boolean exceptionWhileLoading = false;
240:
241: env.logWriter.newEntry(this , " cluster: " + cid,
242: LogWriter.INFO);
243: try {
244: cluster = (Cluster) clusterStore.restoreCluster(cid);
245: env.logWriter.newEntry(this , " "
246: + cluster.containers.count() + " containers",
247: LogWriter.INFO);
248: } catch (Exception e) {
249: env.logWriter.newEntry(this ,
250: "exception while loading cluster: " + cid
251: + " (" + e + ")", LogWriter.WARN);
252: env.logWriter.newEntry(this , "", e, LogWriter.DEBUG);
253: exceptionWhileLoading = true;
254: }
255:
256: if (exceptionWhileLoading || cluster.containers.isEmpty()) {
257: env.logWriter
258: .newEntry(
259: this ,
260: " cluster is empty or unable to read - should be deleted!",
261: LogWriter.INFO);
262: if (cluster != null) {
263: env.logWriter.newEntry(this ,
264: " try to delete cluster...",
265: LogWriter.INFO);
266: cluster.delete();
267: }
268: } else {
269: // fill in idTable and nameTable
270: DxIterator it2 = cluster.containers.iterator();
271: WizardObjectContainer container;
272: while ((container = (WizardObjectContainer) it2.next()) != null) {
273: if (idTable.addForKey(cluster.clusterID(),
274: container.id()) == false) {
275: throw new IllegalStateException(
276: "Unable to add container ID to ID table: "
277: + container.id());
278: }
279: if (container.name() != null) {
280: env.logWriter.newEntry(this ,
281: " adding name: "
282: + container.name(),
283: LogWriter.INFO);
284: if (nameTable.addForKey(container.id(),
285: container.name()) == false) {
286: throw new IllegalStateException(
287: "Unable to add name to name table: "
288: + container.name());
289: }
290: }
291: }
292: }
293: clusterStore.unloadCluster(cid, false);
294: }
295: commitIDTable();
296: commitNameTable();
297: }
298:
299: public Object newTransactionData() {
300: return new TransactionData();
301: }
302:
303: public synchronized ObjectContainer newContainerAndPinAndLock(
304: Transaction ta, OzoneCompatible target, ObjectID objID,
305: Permissions permissions, int lockLevel) throws Exception {
306:
307: WizardObjectContainer container = new WizardObjectContainer(
308: objID);
309:
310: if (target != null) {
311: container.setTarget(target);
312: }
313:
314: clusterStore.registerContainerAndPinAndLock(container,
315: permissions, ta, lockLevel);
316:
317: boolean alright = false;
318:
319: try {
320:
321: garbageCollector.notifyNewObjectContainer(container);
322:
323: TransactionData taData = getTransactionData(ta);
324: ClusterID cid = container.getCluster().clusterID();
325: ObjectID oid = container.id();
326:
327: if (false && env.logWriter.hasTarget(LogWriter.DEBUG)) {
328: env.logWriter.newEntry(this , "newContainer(): cid="
329: + cid + ",oid=" + oid + ".", LogWriter.DEBUG);
330: }
331:
332: taData.idTable.addForKey(cid, oid);
333: taData.idTableChanges_push(new IDTableChange(oid, cid,
334: IDTableChange.STATE_ADDED));
335:
336: alright = true;
337:
338: return container;
339: } finally {
340: if (!alright) {
341: container.getCluster().getLock().release(ta);
342: container.unpin();
343: }
344: }
345: }
346:
347: // public synchronized void deleteContainer (Transaction ta, ObjectContainer _container)
348: // throws Exception {
349: // if (env.logWriter.hasTarget (LogWriter.DEBUG3))
350: // env.logWriter.newEntry (this, "deleteContainer()", LogWriter.DEBUG3);
351: //
352: // WizardObjectContainer container = (WizardObjectContainer)_container;
353: // taData.idTableChanges.push (new IDTableChange (oid, cid, IDTableChange.STATE_ADDED));
354:
355: public void updateLockLevel(Transaction _ta,
356: ObjectContainer _container) throws IOException {
357: if (false && env.logWriter.hasTarget(LogWriter.DEBUG3)) {
358: env.logWriter.newEntry(this , "updateLockLevel()",
359: LogWriter.DEBUG3);
360: }
361:
362: if (_container instanceof WizardObjectContainer) {
363: WizardObjectContainer container = (WizardObjectContainer) _container;
364: container.getCluster().updateLockLevel(_ta);
365:
366: TransactionData taData = getTransactionData(_ta);
367: taData.idTable.addForKey(
368: container.getCluster().clusterID(), container.id());
369: }
370: }
371:
372: /**
373: * Returns the ObjectContainer for the given ObjectID or null if there is
374: * no such container.<p>
375: If an ObjectContainer is returned, it is pinned once.
376: Thus, the caller has to ensure that the ObjectContainer is unpinned as soon as it is not needed anymore.
377: *
378: * @param ta the Transaction for within the container is requested or null.
379: *
380: * Impl. Note: For performance reasons this is the only method of this Store
381: * that is not synchronized. This will not cause problems because the only
382: * field that is updated inside the method (currentContainer) does not need
383: * to be stable while this method is running.
384: */
385: public ObjectContainer containerForIDAndPin(Transaction ta,
386: ObjectID id) throws ObjectNotFoundExc, IOException,
387: ClassNotFoundException {
388:
389: WizardObjectContainer container = null;
390:
391: // search the LRU cluster to speed things up; since this is not
392: // synchronized, checking and accessing currentCluster must be done in
393: // one line to avoid other thread to change the variable in between
394: // container = (currentCluster != null && currentCluster.lock != null) ? currentCluster.containerForID (id) : null;
395: // if (container != null) {
396: // // System.out.print ("+");
397: // return container.isDeleted() ? null : container;
398: // }
399:
400: ClusterID cid = null;
401:
402: // search members of current ta first
403: if (ta != null) {
404: TransactionData taData = getTransactionData(ta);
405: DxMap taDataIDTable = taData.idTable;
406: cid = (ClusterID) taDataIDTable.elementForKey(id);
407:
408: if (cid == null && taData.lrucid != null) {
409: Cluster lru = clusterStore.loadCluster(taData.lrucid,
410: true);
411:
412: if (lru != null) {
413: try {
414: container = lru.lock != null ? lru
415: .containerForID(id) : null;
416: if (container != null) {
417: // System.out.print ("+");
418: if (container.isDeleted()) {
419: return null;
420: } else {
421: container.pin();
422: return container;
423: }
424: }
425: } finally {
426: lru.unpin();
427: }
428: }
429: }
430: }
431:
432: // search global table ONLY if ta doesn't contain the container
433: if (cid == null) {
434: cid = (ClusterID) idTable.elementForKey(id);
435: }
436:
437: // env.logWriter.newEntry( this, "containerForIDAndPin(): id="+id+",cid="+cid+".", LogWriter.DEBUG3 );
438:
439: if (cid == null) {
440: return null;
441: } else {
442: // System.out.println ("-");
443: Cluster cluster = clusterStore.loadCluster(cid, true);
444:
445: if (cluster == null) {
446: throw new ObjectNotFoundExc(
447: "No object registered for ID: " + id);
448: }
449:
450: try {
451: if (ta != null) {
452: getTransactionData(ta).lrucid = cid;
453: }
454:
455: try {
456: container = cluster.containerForID(id);
457:
458: if (container.isDeleted()) {
459: return null;
460: } else {
461: container.pin();
462: return container;
463: }
464: } catch (NullPointerException e) {
465: env.logWriter
466: .newEntry(
467: this ,
468: "NullPointerException during "
469: + cluster
470: + ".containerForID("
471: + id
472: + "). We were able to load a cluster for that ID but the cluster did not contain the container. This is an inconsistent view.",
473: e, LogWriter.ERROR);
474: throw e;
475: }
476:
477: } finally {
478: cluster.unpin();
479: }
480: }
481: }
482:
483: public synchronized ObjectContainer containerForNameAndPin(
484: Transaction ta, String name) throws Exception {
485: if (name == null) {
486: return null;
487: }
488:
489: TransactionData taData = getTransactionData(ta);
490:
491: if (ta == null || taData.nameTable == null) {
492: ObjectID oid = (ObjectID) nameTable.elementForKey(name);
493: return oid != null ? containerForIDAndPin(ta, oid) : null;
494: }
495:
496: if (ta != null) {
497: ObjectID oid = (ObjectID) taData.nameTable
498: .elementForKey(name);
499: return oid != null ? containerForIDAndPin(ta, oid) : null;
500: }
501: return null;
502: }
503:
504: public synchronized void nameContainer(Transaction ta,
505: ObjectContainer container, String name)
506: throws PermissionDeniedExc {
507:
508: if (false && env.logWriter.hasTarget(LogWriter.INFO)) {
509: env.logWriter.newEntry(this , "nameContainer(), nameTable="
510: + nameTable + ".", LogWriter.INFO);
511: }
512:
513: TransactionData taData = getTransactionData(ta);
514: if (taData.nameTable == null) {
515: Object o = null;
516:
517: if (env.logWriter.hasTarget(LogWriter.INFO)) {
518: try {
519: java.lang.reflect.Method method = nameTable
520: .getClass()
521: .getDeclaredMethod("clone", null);
522:
523: if (false) {
524: env.logWriter
525: .newEntry(
526: this ,
527: "nameContainer(), nameTable="
528: + nameTable
529: + ", nameTable.getClass().getDeclaredMethod(\"clone\",null)="
530: + method + ".",
531: LogWriter.INFO);
532: }
533:
534: o = method.invoke(nameTable, null);
535: } catch (Exception e) {
536: env.logWriter.newEntry(this ,
537: "nameContainer(): caught: ", e,
538: LogWriter.INFO);
539: }
540: }
541:
542: if (o == null) {
543: o = nameTable.clone();
544: }
545:
546: taData.nameTable = (DxMap) o;
547: }
548:
549: String oldName = container.name();
550: if (oldName != null) {
551: taData.nameTable.removeForKey(oldName);
552: taData.nameTableChanges_push(new NameTableChange(container
553: .id(), oldName, NameTableChange.STATE_REMOVED));
554: }
555:
556: container.nameTarget(name);
557:
558: if (name != null) {
559: if (oldName == null) {
560: garbageCollector.notifyNewObjectName(container);
561: }
562: taData.nameTable.addForKey(container.id(), name);
563: taData.nameTableChanges_push(new NameTableChange(container
564: .id(), name, NameTableChange.STATE_ADDED));
565: }
566: }
567:
568: public synchronized DxBag clusterOfID(ObjectID id) throws Exception {
569: if (env.logWriter.hasTarget(LogWriter.DEBUG3)) {
570: env.logWriter.newEntry(this , "clusterOfID()",
571: LogWriter.DEBUG3);
572: }
573:
574: ClusterID cid = (ClusterID) idTable.elementForKey(id);
575: if (cid == null) {
576: throw new ObjectNotFoundExc("");
577: }
578:
579: Cluster cluster = clusterStore.loadCluster(cid, true);
580:
581: try {
582:
583: DxBag result = new DxArrayBag();
584: DxIterator it = cluster.containers.iterator();
585: while (it.next() != null) {
586: ObjectContainer container = (ObjectContainer) it
587: .object();
588: result.add(container.id());
589: }
590: return result;
591: } finally {
592: cluster.unpin();
593: }
594: }
595:
596: public synchronized void prepareCommitTransaction(Transaction ta)
597: throws IOException, ClassNotFoundException {
598: if (false && env.logWriter.hasTarget(LogWriter.DEBUG3)) {
599: env.logWriter.newEntry(this , "prepareCommitTransaction()",
600: LogWriter.DEBUG3);
601: }
602:
603: TransactionData taData = getTransactionData(ta);
604:
605: // initialize transaction data fields
606: taData.commitClusterIDs = new DxHashSet(64);
607:
608: DxIterator it = taData.idTable.iterator();
609: // env.logWriter.newEntry( this, "idTable count: " + taData.idTable.count(), LogWriter.DEBUG2 );
610: ClusterID cid;
611:
612: while ((cid = (ClusterID) it.next()) != null) {
613: if (false && env.logWriter.hasTarget(LogWriter.DEBUG3)) {
614: env.logWriter.newEntry(this , "checking cluster: " + cid
615: + " ...", LogWriter.DEBUG3);
616: }
617:
618: if (!taData.commitClusterIDs.contains(cid)) {
619: // env.logWriter.newEntry( this, "prepare commit cluster: " + cid, LogWriter.DEBUG2 );
620: taData.commitClusterIDs.add(cid);
621:
622: Cluster cluster = clusterStore.loadCluster(cid, true);
623:
624: try {
625: // we don't need to do all the stuff if there is no WRITE
626: // lock
627: if (cluster.lock.level(ta) >= Lock.LEVEL_WRITE) {
628:
629: // delete containers from cluster and
630: // prepare newID and deletedID tables
631: DxIterator it2 = cluster.containers.iterator();
632: WizardObjectContainer container;
633: while ((container = (WizardObjectContainer) it2
634: .next()) != null) {
635:
636: if (container.isDeleted()) {
637: if (false) {
638: env.logWriter.newEntry(this ,
639: "container deleted: "
640: + container.id(),
641: LogWriter.DEBUG);
642: }
643:
644: clusterStore
645: .invalidateContainer(container);
646: taData
647: .idTableChanges_push(new IDTableChange(
648: container.id(),
649: cid,
650: IDTableChange.STATE_REMOVED));
651:
652: if (container.name() != null) {
653: taData
654: .nameTableChanges_push(new NameTableChange(
655: container.id(),
656: container.name(),
657: NameTableChange.STATE_REMOVED));
658: }
659: }
660: container.clearState();
661: }
662: }
663: } finally {
664: cluster.unpin();
665: }
666: clusterStore.prepareCommitCluster(ta, cid);
667: }
668: }
669: }
670:
671: protected boolean isCleanShutdown() {
672: return !(new File(COMMIT_FLAG_NAME).exists());
673: }
674:
675: protected void beginCommit(Transaction ta) throws IOException {
676: // env.logWriter.newEntry( this, "beginCommit...", LogWriter.DEBUG2 );
677:
678: // do nothing if this was just a read transaction
679: if (ta.maxLockLevel() < Lock.LEVEL_WRITE) {
680: return;
681: }
682:
683: int commitCount = 0;
684:
685: // read the current commitCount from file
686: File f = new File(env.dir + COMMIT_FLAG_NAME);
687: if (f.exists()) {
688: DataInputStream in = null;
689: try {
690: in = new DataInputStream(new FileInputStream(f));
691: commitCount = in.readInt();
692: } finally {
693: if (in != null) {
694: in.close();
695: }
696: }
697: }
698: commitCount++;
699:
700: DataOutputStream out = null;
701: try {
702: out = new DataOutputStream(new FileOutputStream(f));
703: out.writeInt(commitCount);
704: } finally {
705: if (out != null) {
706: out.close();
707: }
708: }
709: }
710:
711: protected void endCommit(Transaction ta) throws IOException {
712: // env.logWriter.newEntry( this, "endCommit...", LogWriter.DEBUG2 );
713:
714: // do nothing if this was just a read transaction
715: if (ta.maxLockLevel() < Lock.LEVEL_WRITE) {
716: return;
717: }
718:
719: int commitCount = 0;
720:
721: // read the current commitCount from file
722: File f = new File(env.dir + COMMIT_FLAG_NAME);
723: if (f.exists()) {
724: DataInputStream in = null;
725: try {
726: in = new DataInputStream(new FileInputStream(f));
727: commitCount = in.readInt();
728: } finally {
729: if (in != null) {
730: in.close();
731: }
732: }
733: } else {
734: throw new RuntimeException("No commit flag file present.");
735: }
736: commitCount--;
737:
738: if (commitCount > 0) {
739: DataOutputStream out = null;
740: try {
741: out = new DataOutputStream(new FileOutputStream(f));
742: out.writeInt(commitCount);
743: } finally {
744: if (out != null) {
745: out.close();
746: }
747: }
748: } else {
749: if (!(f.delete())) {
750: throw new RuntimeException(
751: "Unable to delete commit flag file. ");
752: }
753: }
754: }
755:
756: public synchronized void commitTransaction(Transaction ta)
757: throws IOException, ClassNotFoundException {
758: if (false && env.logWriter.hasTarget(LogWriter.DEBUG3)) {
759: env.logWriter.newEntry(this , "commitTransaction()",
760: LogWriter.DEBUG3);
761: }
762:
763: beginCommit(ta);
764:
765: TransactionData taData = getTransactionData(ta);
766:
767: // actually write changed clusters to disk
768: DxSet commitClusterIDs = taData.commitClusterIDs;
769: DxIterator it = commitClusterIDs.iterator();
770:
771: ClusterID cid;
772: while ((cid = (ClusterID) it.next()) != null) {
773: if (false && env.logWriter.hasTarget(LogWriter.DEBUG2)) {
774: env.logWriter.newEntry(this , "commit cluster: " + cid,
775: LogWriter.DEBUG2);
776: }
777: clusterStore.commitCluster(ta, cid);
778: }
779:
780: // update global idTable
781: boolean isIDTableChanged = false;
782: IDTableChange idChange = null;
783:
784: while ((idChange = taData.idTableChanges_pop()) != null) {
785: isIDTableChanged = true;
786:
787: switch (idChange.state) {
788: case IDTableChange.STATE_ADDED: {
789: // env.logWriter.newEntry( this, "commit added oid: " + idChange.oid, LogWriter.DEBUG2 );
790: if (idTable.addForKey(idChange.cid, idChange.oid) == false) {
791: throw new IllegalStateException(
792: "Unable to add OID to global ID table.");
793: }
794: break;
795: }
796: case IDTableChange.STATE_REMOVED: {
797: if (false) {
798: env.logWriter.newEntry(this , "commit removed oid: "
799: + idChange.oid, LogWriter.DEBUG2);
800: }
801:
802: if (idTable.removeForKey(idChange.oid) == null) {
803: throw new IllegalStateException(
804: "Unable to remove OID from global ID table.");
805: }
806: break;
807: }
808: default: {
809: throw new RuntimeException(
810: "ID change table entry has inproper state.");
811: }
812: }
813: }
814:
815: // write changes to disk
816: if (isIDTableChanged) {
817: commitIDTable();
818: }
819:
820: // update global nameTable
821: boolean isNameTableChanged = false;
822: NameTableChange nameChange = taData.nameTableChanges_pop();
823:
824: while (nameChange != null) {
825: isNameTableChanged = true;
826:
827: switch (nameChange.state) {
828: case NameTableChange.STATE_ADDED: {
829: nameTable.addForKey(nameChange.oid, nameChange.name);
830: break;
831: }
832: case NameTableChange.STATE_REMOVED: {
833: nameTable.removeForKey(nameChange.name);
834: break;
835: }
836: default: {
837: throw new RuntimeException(
838: "Name change table entry has inproper state.");
839: }
840: }
841: nameChange = taData.nameTableChanges_pop();
842: }
843:
844: // write changes to disk
845: if (isNameTableChanged) {
846: commitNameTable();
847: }
848:
849: endCommit(ta);
850:
851: if (false && env.logWriter.hasTarget(LogWriter.DEBUG2)) {
852: env.logWriter.newEntry(this , " idTable.count(): "
853: + idTable.count(), LogWriter.DEBUG2);
854: env.logWriter.newEntry(this , " nameTable.count(): "
855: + nameTable.count(), LogWriter.DEBUG2);
856: }
857: }
858:
859: /**
860: * @param ta ID of the comitting transaction.
861: * @param created
862: * @param modified
863: */
864: public synchronized void abortTransaction(Transaction ta)
865: throws IOException, ClassNotFoundException {
866: if (env.logWriter.hasTarget(LogWriter.DEBUG3)) {
867: env.logWriter.newEntry(this , "abortTransaction()",
868: LogWriter.DEBUG3);
869: }
870:
871: TransactionData taData = getTransactionData(ta);
872:
873: taData.commitClusterIDs = new DxHashSet(64);
874:
875: DxIterator it = taData.idTable.iterator();
876: ClusterID cid;
877: while ((cid = (ClusterID) it.next()) != null) {
878: if (!taData.commitClusterIDs.contains(cid)) {
879: // We MUST NOT abort read locked clusters (because they may be read locked from other transactions, too)
880:
881: Cluster cluster = clusterStore.loadCluster(cid, true);
882:
883: if (cluster.getLock().level(ta) > Lock.LEVEL_READ) {
884: if (env.logWriter.hasTarget(LogWriter.DEBUG2)) {
885: env.logWriter.newEntry(this , "abort cluster: "
886: + cid, LogWriter.DEBUG2);
887: }
888:
889: clusterStore.abortCluster(ta, cid);
890: } else {
891: // Do a plain unlock, and we are fine.
892: cluster.getLock().release(ta);
893: }
894: taData.commitClusterIDs.add(cid);
895: }
896: }
897: }
898:
899: protected TransactionData getTransactionData(Transaction ta) {
900: return (TransactionData) ta.getData();
901: }
902:
903: /**
904: Tells this StoreManager to report every named object to the garbage collector.
905: */
906: public void reportNamedObjectsToGarbageCollector() {
907: synchronized (this ) {
908: DxIterator i = nameTable.elementSet().iterator();
909:
910: while (i.next() != null) {
911: garbageCollector.notifyNamedObject((ObjectID) i
912: .object());
913: }
914: }
915: }
916: }
|