001: // You can redistribute this software and/or modify it under the terms of
002: // the Ozone Core License version 1 published by ozone-db.org.
003: //
004: // The original code and portions created by SMB are
005: // Copyright (C) 1997-@year@ by SMB GmbH. All rights reserved.
006: //
007: // $Id: Cluster.java,v 1.2 2002/06/08 00:49:39 mediumnet Exp $
008:
009: package org.ozoneDB.core.wizardStore;
010:
011: import java.io.*;
012: import org.ozoneDB.DxLib.*;
013: import org.ozoneDB.core.*;
014: import org.ozoneDB.util.*;
015:
016: /**
017: * @author <a href="http://www.softwarebuero.de/">SMB</a>
018: * @author <a href="http://www.medium.net/">Medium.net</a>
019: * @version $Revision: 1.2 $Date: 2002/06/08 00:49:39 $
020: */
021: public final class Cluster implements Externalizable {
022:
023: protected final static long serialVersionUID = 2L;
024: protected final static byte subSerialVersionUID = 1;
025:
026: /**
027: * The environment. Will be set by the clusterStore.
028: */
029: protected transient Env env;
030:
031: protected transient ClusterStore clusterStore;
032:
033: protected ClusterID clusterID;
034:
035: /**
036: * Maps ObjectIDs into WizardObjectContainers.
037: */
038: protected DxMap containers;
039:
040: protected Permissions permissions;
041:
042: protected transient Lock lock;
043:
044: protected transient long lastTouched;
045:
046: protected transient int bytesPerContainer;
047:
048: protected long modTime;
049:
050: /**
051: The count of users of this cluster which requested it to be pinned.
052: If a cluster is pinned, it may not be "passivated" (i.e. written to disk and forgotten),
053: it has to stay in memory.
054: This cluster is said to be pinned iff pinCount!=0.
055: Access to this count is only allowed during synchronization on this Cluster.
056: */
057: protected transient int pinCount;
058:
059: /**
060: * Constructor to be used for Externalizable object serialisation.
061: */
062: public Cluster() {
063: this (null, null, null, 0);
064: }
065:
066: public Cluster(ClusterID _clusterID, Permissions _permissions,
067: Lock _lock, int _bpc) {
068: // the env and clusterStore will be set by clusterStore
069: clusterID = _clusterID;
070: permissions = _permissions;
071: lock = _lock;
072: bytesPerContainer = _bpc;
073: containers = new DxHashMap(1024);
074: if (lock != null) {
075: ((MROWLock) lock).setClusterID(clusterID);
076: }
077: }
078:
079: public void finalize() {
080: if (env.logWriter.hasTarget(LogWriter.DEBUG3)) {
081: env.logWriter.newEntry(this , "---finalize(): cluster "
082: + clusterID, LogWriter.DEBUG3);
083: }
084: }
085:
086: public long modTime() {
087: return modTime;
088: }
089:
090: /**
091: * Priority of this cluster to stay in the cluster cache. Low return value
092: * means low priority.
093: * @return Cache priority of the cluster.
094: */
095: protected DxLong cachePriority() {
096: return new DxLong(lastTouched);
097: }
098:
099: public synchronized void setCurrentSize(int byteSize) {
100: int containerCount = containers.count();
101: bytesPerContainer = containerCount > 0 ? byteSize
102: / containerCount : clusterStore
103: .currentBytesPerContainer();
104: }
105:
106: public int size() {
107: return containers.count() * bytesPerContainer;
108: }
109:
110: public ClusterID clusterID() {
111: return clusterID;
112: }
113:
114: /**
115: * @return True if at least one container is currently invoked.
116: */
117: public boolean isInvoked() {
118: //FIXME: should be a bit in the status byte that is directly changed
119: // by clusters when they are changed
120: WizardObjectContainer container;
121: DxIterator it = containers.iterator();
122: while ((container = (WizardObjectContainer) it.next()) != null) {
123: if (container.isInvoked()) {
124: return true;
125: }
126: }
127: return false;
128: }
129:
130: public synchronized void touch() {
131: lastTouched = clusterStore.touchCount++;
132: }
133:
134: public void registerContainer(WizardObjectContainer container) {
135: synchronized (container) {
136: container.setCluster(this );
137: if (containers.addForKey(container, container.id()) == false) {
138: throw new IllegalStateException("Unable to add id "
139: + container.id() + " to cluster " + clusterID());
140: }
141: }
142: }
143:
144: public void removeContainer(WizardObjectContainer container) {
145: if (containers.removeForKey(container.id()) == null) {
146: throw new IllegalStateException(
147: "Unable to remove container from cluster.");
148: }
149: }
150:
151: public WizardObjectContainer containerForID(ObjectID id) {
152: return (WizardObjectContainer) containers.elementForKey(id);
153: }
154:
155: /**
156: * Note: This method must not be synchronized.
157: *
158: * @param ta
159: * @param lockLevel
160: */
161: public void updateLockLevel(Transaction ta) throws IOException {
162: if (false && env.logWriter.hasTarget(LogWriter.DEBUG2)) {
163: env.logWriter.newEntry(this , "updateLockLevel(): "
164: + clusterID, LogWriter.DEBUG2);
165: env.logWriter.newEntry(this , " lock: " + lock.level(ta),
166: LogWriter.DEBUG2);
167: }
168: if (lock.level(ta) > Lock.LEVEL_UPGRADE) {
169: saveShadow();
170: }
171: }
172:
173: public void prepareCommit(Transaction ta) /*throws Exception*/{
174: if (false && env.logWriter.hasTarget(LogWriter.DEBUG3)) {
175: env.logWriter.newEntry(this , "prepareCommit()" + clusterID,
176: LogWriter.DEBUG3);
177: }
178: if (lock.level(ta) > Lock.LEVEL_UPGRADE) {
179: synchronized (this ) {
180: modTime = System.currentTimeMillis();
181: }
182: }
183: }
184:
185: public void commit(Transaction ta) throws IOException {
186: if (false && env.logWriter.hasTarget(LogWriter.DEBUG3)) {
187: env.logWriter.newEntry(this , "commit()" + clusterID,
188: LogWriter.DEBUG3);
189: }
190:
191: if (true || lock != null) {
192: if (lock.level(ta) > Lock.LEVEL_UPGRADE) {
193: deleteShadow();
194: }
195: lock.release(ta);
196: } else {
197: // env.logWriter.newEntry( this, this+".commit(): lock="+lock+".", LogWriter.DEBUG3 );
198: throw new NullPointerException(this + ".commit(" + ta
199: + "): lock=" + lock + ".");
200: }
201: }
202:
203: public void abort(Transaction ta) throws IOException {
204: if (false && env.logWriter.hasTarget(LogWriter.DEBUG3)) {
205: env.logWriter.newEntry(this , "abort()" + clusterID,
206: LogWriter.DEBUG3);
207: }
208:
209: if (true || lock != null) {
210: if (lock.level(ta) > Lock.LEVEL_UPGRADE) {
211: restoreShadow();
212: deleteShadow();
213: }
214: lock.release(ta);
215: } else {
216: throw new NullPointerException(this + ".abort(" + ta
217: + "): lock=" + lock + ".");
218: }
219: }
220:
221: // protected void clearContainerStates() {
222: // WizardObjectContainer container;
223: // DxIterator it = containers.iterator();
224: // while ((container=(WizardObjectContainer)it.next()) != null) {
225: // container.clearState();
226: // }
227: // }
228:
229: protected DxCollection allLockers() {
230: DxCollection lockerIDs = lock.lockerIDs();
231:
232: DxArrayBag result = new DxArrayBag(lockerIDs.count());
233: DxIterator it = lockerIDs.iterator();
234: while (it.next() != null) {
235: result.add(env.transactionManager
236: .taForID((TransactionID) it.object()));
237: }
238:
239: return result;
240: }
241:
242: protected void saveShadow() throws IOException {
243: if (env != null) {
244: // env.logWriter.newEntry( this, "saveShadow(): " + clusterID, LogWriter.DEBUG3 );
245: }
246:
247: String basename = clusterStore.basename(clusterID);
248: File orig = new File(basename + ClusterStore.POSTFIX_CLUSTER);
249: File shadow = new File(basename + ClusterStore.POSTFIX_SHADOW);
250:
251: // earlier versions did just _move_ the file; this was faster and caused
252: // no problems because a abnormal server exit did force a recovery in any
253: // case; this behaviour has changed and so we have to copy the file now
254: // in order to keep an intact cluster
255:
256: // orig.renameTo( shadow );
257:
258: InputStream in = null;
259: OutputStream out = null;
260: try {
261: in = new FileInputStream(orig);
262: out = new FileOutputStream(shadow);
263:
264: final int chunkSize = 4096;
265: byte[] chunk = new byte[chunkSize];
266: int c = 0;
267: while ((c = in.read(chunk)) != -1) {
268: out.write(chunk, 0, c);
269: }
270: } finally {
271: if (in != null) {
272: in.close();
273: }
274: if (out != null) {
275: out.close();
276: }
277: }
278: }
279:
280: /**
281: * Restore the saved shadow on disk. The content of the receiver stays the
282: * same. The cluster needs to be re-loaded to reflect the changes.
283: */
284: protected void restoreShadow() throws IOException {
285: if (env != null) {
286: // env.logWriter.newEntry( this, "restoreShadow(): " + clusterID, LogWriter.DEBUG3 );
287: }
288:
289: // restore cluster on disk by renaming back shadow file
290: String basename = clusterStore.basename(clusterID);
291: File orig = new File(basename + ClusterStore.POSTFIX_CLUSTER);
292: File shadow = new File(basename + ClusterStore.POSTFIX_SHADOW);
293:
294: // on Win renaming without prior deleting the target does not work;
295: // long live WORA !
296: orig.delete();
297: if (!shadow.renameTo(orig)) {
298: throw new IOException("Unable to rename shadow file.");
299: }
300: }
301:
302: protected void deleteShadow() throws IOException {
303: if (false && env.logWriter.hasTarget(LogWriter.DEBUG3)) {
304: env.logWriter.newEntry(this ,
305: "deleteShadow(): " + clusterID, LogWriter.DEBUG3);
306: }
307:
308: String basename = clusterStore.basename(clusterID);
309: File shadow = new File(basename + ClusterStore.POSTFIX_SHADOW);
310: if (shadow.exists() && !shadow.delete()) {
311: throw new IOException("Unable to delete shadow file.");
312: }
313: }
314:
315: /**
316: * Delete this cluster from the disk.
317: */
318: protected void delete() throws Exception {
319: String basename = clusterStore.basename(clusterID);
320: new File(basename + ClusterStore.POSTFIX_CLUSTER).delete();
321: new File(basename + ClusterStore.POSTFIX_SHADOW).delete();
322: new File(basename + ClusterStore.POSTFIX_TEMP).delete();
323: new File(basename + ClusterStore.POSTFIX_LOCK).delete();
324: }
325:
326: public String toString() {
327: return "Cluster[id=" + clusterID + "]";
328: }
329:
330: protected Lock getLock() {
331: return lock;
332: }
333:
334: protected void setLock(Lock to) {
335: if (false && to == null) {
336: if (env.logWriter.hasTarget(LogWriter.INFO)) {
337: env.logWriter.newEntry(this , this + ".setLock(" + to
338: + ") (oldLock=" + this .lock + "), isPinned()="
339: + isPinned() + ".", new Exception(),
340: LogWriter.INFO);
341: }
342: }
343:
344: this .lock = to;
345: }
346:
347: public void writeExternal(ObjectOutput out) throws IOException {
348: // System.out.println ("cluster.writeExternal()...");
349: out.writeByte(subSerialVersionUID);
350: out.writeObject(clusterID);
351: out.writeObject(permissions);
352: out.writeLong(modTime);
353: // out.writeObject (lock);
354:
355: out.writeInt(containers.count());
356: DxIterator it = containers.iterator();
357: WizardObjectContainer container;
358: while ((container = (WizardObjectContainer) it.next()) != null) {
359: // out.writeObject( container );
360: container.writeExternal(out);
361: }
362: }
363:
364: public void readExternal(ObjectInput in) throws IOException,
365: ObjectStreamException, ClassNotFoundException {
366: // System.out.println ("cluster.readExternal()...");
367: byte streamUID = in.readByte();
368: clusterID = (ClusterID) in.readObject();
369: permissions = (Permissions) in.readObject();
370: modTime = in.readLong();
371: // lock = (Lock)in.readObject();
372:
373: int count = in.readInt();
374: for (int i = 0; i < count; i++) {
375: try {
376: // WizardObjectContainer container = (WizardObjectContainer)in.readObject();
377: WizardObjectContainer container = new WizardObjectContainer();
378: container.readExternal(in);
379: container.setCluster(this );
380: // Env.currentEnv().getLogWriter().newEntry(this,this+".readExternal(): container.id()="+container.id()+".",LogWriter.DEBUG2);
381: containers.addForKey(container, container.id());
382: } catch (ObjectStreamException e) {
383: if (env != null) {
384: env.logWriter.newEntry(this ,
385: "ObjectStreamException for cluster "
386: + clusterID + " at container #" + i
387: + ": ", e, LogWriter.ERROR);
388: } else {
389: System.out
390: .println("ObjectStreamException for cluster "
391: + clusterID
392: + " at container #"
393: + i
394: + ": " + e);
395: }
396: throw e;
397: }
398: }
399: }
400:
401: /**
402: Pins this Cluster.
403: Every caller of this method must pair this call with a call to {@link #unpin}.
404: A Cluster remains in main memory at least as long as it is pinned.
405: */
406: public synchronized void pin() {
407: pinCount++;
408: }
409:
410: /**
411: Unpins this Cluster.
412: This method must be called exactly once for every call to {@link #pin}.
413: */
414: public synchronized void unpin() {
415: pinCount--;
416: }
417:
418: /**
419: Returns wether this cluster is pinned.
420: */
421: public synchronized boolean isPinned() {
422: return pinCount != 0;
423: }
424:
425: /**
426: Sets the pin count to zero and returns the former pin count.
427: */
428: public synchronized int clearPinCount() {
429: int oldPinCount = pinCount;
430:
431: pinCount = 0;
432:
433: return oldPinCount;
434: }
435:
436: /**
437: Adds an amount to the pin count
438: */
439: public synchronized void addPinCount(int what) {
440: pinCount += what;
441: }
442: }
|