0001: /*
0002:
0003: Derby - Class org.apache.derby.impl.store.raw.data.RAFContainer
0004:
0005: Licensed to the Apache Software Foundation (ASF) under one or more
0006: contributor license agreements. See the NOTICE file distributed with
0007: this work for additional information regarding copyright ownership.
0008: The ASF licenses this file to you under the Apache License, Version 2.0
0009: (the "License"); you may not use this file except in compliance with
0010: the License. You may obtain a copy of the License at
0011:
0012: http://www.apache.org/licenses/LICENSE-2.0
0013:
0014: Unless required by applicable law or agreed to in writing, software
0015: distributed under the License is distributed on an "AS IS" BASIS,
0016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017: See the License for the specific language governing permissions and
0018: limitations under the License.
0019:
0020: */
0021:
0022: package org.apache.derby.impl.store.raw.data;
0023:
0024: import org.apache.derby.iapi.reference.SQLState;
0025: import org.apache.derby.impl.store.raw.data.BaseContainer;
0026: import org.apache.derby.impl.store.raw.data.BaseContainerHandle;
0027: import org.apache.derby.impl.store.raw.data.BasePage;
0028:
0029: import org.apache.derby.iapi.services.cache.Cacheable;
0030: import org.apache.derby.iapi.services.context.ContextService;
0031: import org.apache.derby.iapi.services.monitor.Monitor;
0032: import org.apache.derby.iapi.services.diag.Performance;
0033: import org.apache.derby.iapi.services.sanity.SanityManager;
0034: import org.apache.derby.iapi.services.io.FormatIdUtil;
0035:
0036: import org.apache.derby.iapi.error.StandardException;
0037:
0038: import org.apache.derby.iapi.store.raw.ContainerHandle;
0039: import org.apache.derby.iapi.store.raw.ContainerKey;
0040: import org.apache.derby.iapi.store.raw.Loggable;
0041: import org.apache.derby.iapi.store.raw.log.LogInstant;
0042: import org.apache.derby.iapi.store.raw.xact.RawTransaction;
0043:
0044: import org.apache.derby.io.StorageFactory;
0045: import org.apache.derby.io.WritableStorageFactory;
0046: import org.apache.derby.io.StorageFile;
0047: import org.apache.derby.io.StorageRandomAccessFile;
0048: import org.apache.derby.iapi.services.io.FileUtil;
0049: import java.util.Vector;
0050:
0051: import java.io.DataInput;
0052: import java.io.IOException;
0053: import java.io.File;
0054: import java.io.RandomAccessFile;
0055: import java.security.AccessController;
0056: import java.security.PrivilegedExceptionAction;
0057: import java.security.PrivilegedActionException;
0058: import java.lang.reflect.Method;
0059: import java.lang.reflect.Constructor;
0060:
0061: /**
0062: RAFContainer (short for RandomAccessFileContainer) is a concrete subclass of FileContainer
0063: for FileContainers which are implemented on java.io.RandomAccessFile.
0064: */
0065:
0066: class RAFContainer extends FileContainer implements
0067: PrivilegedExceptionAction {
0068:
0069: /*
0070: * Immutable fields
0071: */
0072: protected StorageRandomAccessFile fileData;
0073:
0074: /*
0075: ** Mutable fields, only valid when the identity is valid.
0076: */
0077: protected boolean needsSync;
0078:
0079: /* privileged actions */
0080: private int actionCode;
0081: private static final int GET_FILE_NAME_ACTION = 1;
0082: private static final int CREATE_CONTAINER_ACTION = 2;
0083: private static final int REMOVE_FILE_ACTION = 3;
0084: private static final int OPEN_CONTAINER_ACTION = 4;
0085: private static final int STUBBIFY_ACTION = 5;
0086: private static final int BACKUP_CONTAINER_ACTION = 6;
0087: private static final int GET_RANDOM_ACCESS_FILE_ACTION = 7;
0088: private ContainerKey actionIdentity;
0089: private boolean actionStub;
0090: private boolean actionErrorOK;
0091: private boolean actionTryAlternatePath;
0092: private StorageFile actionFile;
0093: private LogInstant actionInstant;
0094: private String actionBackupLocation;
0095: private BaseContainerHandle actionContainerHandle;
0096:
0097: private boolean inBackup = false;
0098: private boolean inRemove = false;
0099:
0100: /* Fields with references to classes and methods in ReentrantLock
0101: * introduced in Java 1.5. Reflection is used to only use these
0102: * interfaces if they exist.
0103: *
0104: */
0105: private static Class fairLockClass;
0106: private static Constructor fairLockConstructor;
0107: private static Method lock;
0108: private static Method unlock;
0109: private static boolean hasJava5FairLocks = false;
0110:
0111: // Use reflection to find the constructor, lock() and unlock() in
0112: // java.util.concurrent.locks.ReentrantLock. If the class and its
0113: // methods are found, hasJava5FairLocks will be true and fair
0114: // locking can be used.
0115: static {
0116: try {
0117: fairLockClass = Class
0118: .forName("java.util.concurrent.locks.ReentrantLock");
0119:
0120: fairLockConstructor = fairLockClass
0121: .getConstructor(new Class[] { Boolean.TYPE });
0122:
0123: lock = fairLockClass.getMethod("lock", new Class[0]);
0124: unlock = fairLockClass.getMethod("unlock", new Class[0]);
0125: hasJava5FairLocks = true;
0126: } catch (NoSuchMethodException nsme) {
0127: } catch (ClassNotFoundException cnfe) {
0128: }
0129: }
0130:
0131: /**
0132: * Object of type java.util.concurrent.locks.ReentrantLock. It is
0133: * used to prevent starvation when many threads are reading from
0134: * the same file.
0135: */
0136: private Object fairLock;
0137:
0138: /*
0139: * Constructors
0140: */
0141:
0142: RAFContainer(BaseDataFileFactory factory) {
0143: super (factory);
0144:
0145: // If Java 1.5 fair locks are available, construct one.
0146: if (hasJava5FairLocks) {
0147: try {
0148: // construct a lock with fairness set to true
0149: fairLock = fairLockConstructor
0150: .newInstance(new Object[] { Boolean.TRUE });
0151: } catch (Exception e) {
0152: // couldn't construct the lock, fall back to old behaviour
0153:
0154: hasJava5FairLocks = false;
0155: if (SanityManager.DEBUG) {
0156: SanityManager.THROWASSERT(
0157: "failed constructing ReentrantLock", e);
0158: }
0159: }
0160: }
0161: }
0162:
0163: /*
0164: ** Methods overriding super-class
0165: */
0166:
0167: synchronized public boolean isDirty() {
0168: return super .isDirty() || needsSync;
0169: }
0170:
0171: /*
0172: ** Methods of Cacheable
0173: */
0174:
0175: /**
0176: Set container's identity
0177: @exception StandardException Standard Cloudscape error policy
0178: */
0179: public Cacheable setIdentity(Object key) throws StandardException {
0180:
0181: ContainerKey newIdentity = (ContainerKey) key;
0182:
0183: // if this is an open for a temp container then return an object of that type
0184: if (newIdentity.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT) {
0185:
0186: TempRAFContainer tmpContainer = new TempRAFContainer(
0187: dataFactory);
0188: return tmpContainer.setIdent(newIdentity);
0189: }
0190:
0191: return setIdent(newIdentity);
0192: }
0193:
0194: /**
0195: @exception StandardException Standard Cloudscape error policy
0196: */
0197: public Cacheable createIdentity(Object key, Object createParameter)
0198: throws StandardException {
0199:
0200: ContainerKey newIdentity = (ContainerKey) key;
0201:
0202: if (newIdentity.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT) {
0203: TempRAFContainer tmpContainer = new TempRAFContainer(
0204: dataFactory);
0205: return tmpContainer.createIdent(newIdentity,
0206: createParameter);
0207: }
0208:
0209: return createIdent(newIdentity, createParameter);
0210: }
0211:
0212: /*
0213: ** Container creation, opening, and closing
0214: */
0215:
0216: /**
0217: Remove the container
0218:
0219: @exception StandardException Standard Cloudscape error policy
0220: */
0221: protected void removeContainer(LogInstant instant, boolean leaveStub)
0222: throws StandardException {
0223:
0224: try {
0225: synchronized (this ) {
0226: inRemove = true;
0227: // wait until the thread that is doing the backup stops
0228: // before proceeding with the remove.
0229: while (inBackup) {
0230: try {
0231: wait();
0232: } catch (InterruptedException ie) {
0233: throw StandardException.interrupt(ie);
0234: }
0235: }
0236: }
0237:
0238: // discard all of my pages in the cache
0239: pageCache.discard(identity);
0240: stubbify(instant);
0241: } finally {
0242: synchronized (this ) {
0243: inRemove = false;
0244: notifyAll();
0245: }
0246: }
0247:
0248: // RESOLVE: leaveStub false
0249: }
0250:
0251: final void closeContainer() {
0252:
0253: if (fileData != null) {
0254: try {
0255: fileData.close();
0256: } catch (IOException ioe) {
0257: } finally {
0258:
0259: fileData = null;
0260: }
0261: }
0262: }
0263:
0264: /*
0265: ** Methods used solely by StoredPage
0266: */
0267:
0268: /**
0269: Read a page into the supplied array.
0270:
0271: <BR> MT - thread safe
0272: @exception IOException exception reading page
0273: @exception StandardException Standard Cloudscape error policy
0274: */
0275: protected void readPage(long pageNumber, byte[] pageData)
0276: throws IOException, StandardException {
0277: if (SanityManager.DEBUG) {
0278: SanityManager.ASSERT(!getCommittedDropState());
0279: }
0280:
0281: long pageOffset = pageNumber * pageSize;
0282:
0283: // Use Java 1.5 fair locks if they are available.
0284: if (hasJava5FairLocks) {
0285: try {
0286: lock.invoke(fairLock, null);
0287: } catch (Exception e) {
0288: // Something bad happened while trying to lock the
0289: // region. Since the locking is not required for
0290: // anything other than ensuring fairness, it is ok to
0291: // fall back to pre-1.5 behaviour.
0292: hasJava5FairLocks = false;
0293: if (SanityManager.DEBUG) {
0294: SanityManager.THROWASSERT(
0295: "failed invoking ReentrantLock.lock()", e);
0296: }
0297: }
0298: }
0299:
0300: try {
0301: // Starvation might occur at this point if many threads
0302: // are waiting for the monitor. This section is therefore
0303: // surrounded by calls to ReentrantLock.lock()/unlock() if
0304: // we are running Java 1.5 or higher.
0305: synchronized (this ) {
0306: fileData.seek(pageOffset);
0307: fileData.readFully(pageData, 0, pageSize);
0308: }
0309: } finally {
0310: // Unlock this section.
0311: if (hasJava5FairLocks) {
0312: try {
0313: unlock.invoke(fairLock, null);
0314: } catch (Exception e) {
0315: // An error occurred while unlocking the
0316: // region. The region might still be locked, so
0317: // we'd better stop using this kind of
0318: // locking. There will be no loss of
0319: // functionality, only a possible loss of
0320: // fairness.
0321: hasJava5FairLocks = false;
0322: if (SanityManager.DEBUG) {
0323: SanityManager
0324: .THROWASSERT(
0325: "failed invoking ReentrantLock.unlock()",
0326: e);
0327: }
0328: }
0329: }
0330: }
0331:
0332: if (dataFactory.databaseEncrypted()
0333: && pageNumber != FIRST_ALLOC_PAGE_NUMBER) {
0334: decryptPage(pageData, pageSize);
0335: }
0336: }
0337:
0338: /**
0339: Write a page from the supplied array.
0340:
0341: <BR> MT - thread safe
0342:
0343: @exception StandardException Standard Cloudscape error policy
0344: @exception IOException IO error accessing page
0345: */
0346: protected void writePage(long pageNumber, byte[] pageData,
0347: boolean syncPage) throws IOException, StandardException {
0348: synchronized (this ) {
0349:
0350: if (getCommittedDropState()) {
0351: // committed and dropped, do nothing.
0352: // This file container may only be a stub
0353:
0354: return;
0355: }
0356:
0357: ///////////////////////////////////////////////////
0358: //
0359: // RESOLVE: right now, no logical -> physical mapping.
0360: // We can calculate the offset. In the future, we may need to
0361: // look at the allocation page or the in memory translation table
0362: // to figure out where the page should go
0363: //
0364: /////////////////////////////////////////////////
0365:
0366: long pageOffset = pageNumber * pageSize;
0367:
0368: byte[] encryptionBuf = null;
0369: if (dataFactory.databaseEncrypted()
0370: && pageNumber != FIRST_ALLOC_PAGE_NUMBER) {
0371: // We cannot encrypt the page in place because pageData is
0372: // still being accessed as clear text. The encryption
0373: // buffer is shared by all who access this container and can
0374: // only be used within the synchronized block.
0375:
0376: encryptionBuf = getEncryptionBuffer();
0377: }
0378:
0379: byte[] dataToWrite = updatePageArray(pageNumber, pageData,
0380: encryptionBuf, false);
0381:
0382: try {
0383: fileData.seek(pageOffset);
0384:
0385: /**
0386: On EPOC (www.symbian.com) a seek beyond the end of
0387: a file just moves the file pointer to the end of the file.
0388:
0389: */
0390: if (fileData.getFilePointer() != pageOffset)
0391: padFile(fileData, pageOffset);
0392:
0393: dataFactory.writeInProgress();
0394: try {
0395: fileData.write(dataToWrite, 0, pageSize);
0396: } finally {
0397: dataFactory.writeFinished();
0398: }
0399: } catch (IOException ioe) {
0400: // On some platforms, if we seek beyond the end of file, or try
0401: // to write beyond the end of file (not appending to it, but
0402: // skipping some bytes), it will give IOException.
0403: // Try writing zeros from the current end of file to pageOffset
0404: // and see if we can then do the seek/write. The difference
0405: // between pageOffset and current end of file is almost always
0406: // going to be the multiple of pageSize
0407:
0408: if (!padFile(fileData, pageOffset))
0409: throw ioe; // not writing beyond EOF, rethrow exception
0410:
0411: if (SanityManager.DEBUG) {
0412: SanityManager.ASSERT(
0413: fileData.length() >= pageOffset,
0414: "failed to blank filled missing pages");
0415: }
0416:
0417: fileData.seek(pageOffset);
0418: dataFactory.writeInProgress();
0419: try {
0420: fileData.write(dataToWrite, 0, pageSize);
0421: } finally {
0422: dataFactory.writeFinished();
0423: }
0424: }
0425:
0426: if (syncPage) {
0427: dataFactory.writeInProgress();
0428: try {
0429: if (!dataFactory.dataNotSyncedAtAllocation)
0430: fileData.sync(false);
0431: } finally {
0432: dataFactory.writeFinished();
0433: }
0434: } else {
0435: needsSync = true;
0436: }
0437: }
0438:
0439: }
0440:
0441: /**
0442: * Update the page array with container header if the page is a first alloc
0443: * page and encrypt the page data if the database is encrypted.
0444: * @param pageNumber the page number of the page
0445: * @param pageData byte array that has the actual page data.
0446: * @param encryptionBuf buffer that is used to store encryted version of the
0447: * page.
0448: * @return byte array of the the page data as it should be on the disk.
0449: */
0450: private byte[] updatePageArray(long pageNumber, byte[] pageData,
0451: byte[] encryptionBuf, boolean encryptWithNewEngine)
0452: throws StandardException, IOException {
0453: if (pageNumber == FIRST_ALLOC_PAGE_NUMBER) {
0454: // write header into the alloc page array regardless of dirty
0455: // bit because the alloc page have zero'ed out the borrowed
0456: // space
0457: writeHeader(pageData);
0458:
0459: if (SanityManager.DEBUG) {
0460: if (FormatIdUtil.readFormatIdInteger(pageData) != AllocPage.FORMAT_NUMBER)
0461: SanityManager.THROWASSERT("expect "
0462: + AllocPage.FORMAT_NUMBER
0463: + "got "
0464: + FormatIdUtil
0465: .readFormatIdInteger(pageData));
0466: }
0467:
0468: return pageData;
0469:
0470: } else {
0471: if (dataFactory.databaseEncrypted() || encryptWithNewEngine) {
0472: return encryptPage(pageData, pageSize, encryptionBuf,
0473: encryptWithNewEngine);
0474: } else {
0475: return pageData;
0476: }
0477: }
0478: }
0479:
0480: /**
0481: Pad the file upto the passed in page offset.
0482: Returns true if the file needed padding.
0483: */
0484:
0485: private boolean padFile(StorageRandomAccessFile file,
0486: long pageOffset) throws IOException, StandardException {
0487:
0488: long currentEOF = file.length();
0489: if (currentEOF >= pageOffset)
0490: return false;
0491:
0492: // all objects in java are by definition initialized
0493: byte zero[] = new byte[pageSize];
0494:
0495: file.seek(currentEOF);
0496:
0497: while (currentEOF < pageOffset) {
0498: dataFactory.writeInProgress();
0499: try {
0500: long len = pageOffset - currentEOF;
0501: if (len > pageSize)
0502: len = pageSize;
0503:
0504: file.write(zero, 0, (int) len);
0505: } finally {
0506: dataFactory.writeFinished();
0507: }
0508: currentEOF += pageSize;
0509: }
0510:
0511: return true;
0512: }
0513:
0514: /**
0515: * Clean the container.
0516: * <p>
0517: * Write out the container header and sync all dirty pages of this
0518: * container to disk before returning.
0519: * <p>
0520: * checkpoint calls this interface through callbacks by telling
0521: * the cache manager to clean all containers in the open container
0522: * cache. This sync of the file happens as part of writing and then
0523: * syncing the container header in writeRAFHeader().
0524: * <p>
0525: *
0526: * @param forRemove Is clean called because container is being removed?
0527: *
0528: * @exception StandardException Standard exception policy.
0529: **/
0530: public void clean(boolean forRemove) throws StandardException {
0531: boolean waited = false;
0532:
0533: synchronized (this ) {
0534:
0535: // committed and dropped, do nothing.
0536: // This file container has already been stubbified
0537: if (getCommittedDropState()) {
0538: clearDirty();
0539: return;
0540: }
0541:
0542: // The container is about to change, need to wait till it is really
0543: // changed. We are in the predirty state only for the duration
0544: // where the log record that changed the container has been sent to
0545: // the log and before the change actually happened.
0546: while (preDirty == true) {
0547: waited = true;
0548: try {
0549: wait();
0550: } catch (InterruptedException ie) {
0551: throw StandardException.interrupt(ie);
0552: }
0553: }
0554:
0555: if (waited) {
0556: // someone else may have stubbified this while we waited
0557: if (getCommittedDropState()) {
0558: clearDirty();
0559: return;
0560: }
0561: }
0562:
0563: if (forRemove) {
0564:
0565: // removeFile()
0566: // clearDirty();
0567:
0568: } else if (isDirty()) {
0569:
0570: try {
0571:
0572: // Cannot get the alloc page and write it out
0573: // because in order to do so, the alloc page will need to
0574: // find this container object. But this container object
0575: // is in the middle of being cleaned and may not be
0576: // 'found' and we will hang.
0577: //
0578: // Instead, just clobber the container info, which is
0579: // checksum'ed seperately from the alloc page
0580: //
0581: writeRAFHeader(fileData, false, // don't create, container exists
0582: true); // syncfile
0583:
0584: clearDirty();
0585:
0586: } catch (IOException ioe) {
0587:
0588: throw dataFactory.markCorrupt(StandardException
0589: .newException(
0590: SQLState.FILE_CONTAINER_EXCEPTION,
0591: ioe, this ));
0592: }
0593: }
0594: }
0595: }
0596:
0597: private void clearDirty() {
0598: isDirty = false;
0599: needsSync = false;
0600: }
0601:
0602: /**
0603: Preallocate some pages if need be
0604: */
0605: protected int preAllocate(long lastPreallocPagenum, int preAllocSize) {
0606:
0607: /* we had a condition here , that looks at the file size before
0608: * preallocation to handle the optimization cases like , we
0609: * preallocated the space and then crashed, as we don;t log the
0610: * preallocated length, we don't have updated value until AlocExtent
0611: * page get flushed to the disk. only way to find out that the pages
0612: * we want already exist is to look at the file length.
0613: * Althought it was nice thing to do, we had bug no: 3813 from
0614: * customer , who for some unexplainable reasons he gets lots of
0615: * junk at the end of the file. As junk is not initialized with
0616: * format-ID , we get into recovery problem.
0617: * To avoid such unforseen conditions, removed the file size check
0618: * condition , as it is better not to fail in recovery than
0619: * losing some special case performance improvement.
0620: */
0621:
0622: int n = doPreAllocatePages(lastPreallocPagenum, preAllocSize);
0623:
0624: if (n > 0) // sync the file
0625: {
0626: synchronized (this ) {
0627: boolean inwrite = false;
0628: try {
0629: dataFactory.writeInProgress();
0630: inwrite = true;
0631:
0632: if (!dataFactory.dataNotSyncedAtAllocation)
0633: fileData.sync(false);
0634: } catch (IOException ioe) {
0635: // The disk may have run out of space.
0636: // Don't error out in pre-allocation since the user may not
0637: // actually need this page.
0638: n = 0;
0639: } catch (StandardException se) {
0640: // some problem calling writeInProgress
0641: n = 0;
0642: } finally {
0643: if (inwrite)
0644: dataFactory.writeFinished();
0645: }
0646: }
0647: }
0648:
0649: return n;
0650: }
0651:
0652: /**
0653: * Truncate pages of a container.
0654: * <p>
0655: * Truncate all pages from lastValidPagenum+1 through the end of the file.
0656: * <p>
0657: *
0658: * @param lastValidPagenum The page number of the last valid page of the
0659: * file. All pages after this one are truncated.
0660: *
0661: * @exception StandardException Standard exception policy.
0662: **/
0663: protected void truncatePages(long lastValidPagenum)
0664: throws StandardException {
0665:
0666: synchronized (this ) {
0667: boolean inwrite = false;
0668: try {
0669: dataFactory.writeInProgress();
0670: inwrite = true;
0671:
0672: fileData.setLength((lastValidPagenum + 1) * pageSize);
0673: } catch (IOException ioe) {
0674: // The disk may have run out of space.
0675: // Don't error out in un-allocation since application can
0676: // still function even if allocation fails.
0677: } catch (StandardException se) {
0678: // some problem calling writeInProgress
0679: } finally {
0680: if (inwrite)
0681: dataFactory.writeFinished();
0682: }
0683: }
0684:
0685: return;
0686: }
0687:
0688: /*
0689: Write the header of a random access file and sync it
0690: @param create if true, the container is being created
0691: if false, the container already exist
0692: @param syncFile if true, sync the file
0693: */
0694: private void writeRAFHeader(StorageRandomAccessFile file,
0695: boolean create, boolean syncFile) throws IOException,
0696: StandardException {
0697: byte[] epage;
0698: if (create) {
0699: // the file doesn't exist yet, get an embryonic page buffer
0700: epage = getEmbryonicPage((DataInput) null);
0701: } else {
0702: file.seek(FIRST_ALLOC_PAGE_OFFSET);
0703: epage = getEmbryonicPage(file);
0704: }
0705:
0706: // need to check for frozen state
0707:
0708: file.seek(FIRST_ALLOC_PAGE_OFFSET);
0709: writeHeader(file, create, epage);
0710:
0711: // leave the end of the file at a page boundry. This
0712: // is to work around bugs in the EPOC jvm where a seek
0713: // beyond the end of a file does not throw an exception
0714: // but just moves the offset to the end of the file. This only
0715: // occurs when the second page is written after the header has
0716: // been written, ending up with the page at the incorrect offset.
0717: if (create) {
0718: padFile(file, pageSize);
0719: }
0720:
0721: if (syncFile) {
0722: dataFactory.writeInProgress();
0723: try {
0724: if (!dataFactory.dataNotSyncedAtCheckpoint)
0725: file.sync(false);
0726:
0727: } finally {
0728: dataFactory.writeFinished();
0729: }
0730: }
0731:
0732: epage = null;
0733: }
0734:
0735: /**
0736: flush the cache to ensure all of my pages are written to disk
0737:
0738: @exception StandardException Standard Cloudscape error policy
0739: */
0740: protected void flushAll() throws StandardException {
0741:
0742: pageCache.clean(identity);
0743:
0744: // now clean myself which will sync all my pages.
0745: clean(false);
0746: }
0747:
0748: synchronized StorageFile getFileName(ContainerKey identity,
0749: boolean stub, boolean errorOK, boolean tryAlternatePath)
0750: throws StandardException {
0751: // RESOLVE - READ ONLY
0752:
0753: actionCode = GET_FILE_NAME_ACTION;
0754: actionIdentity = identity;
0755: actionStub = stub;
0756: actionErrorOK = errorOK;
0757: actionTryAlternatePath = tryAlternatePath;
0758: try {
0759: return (StorageFile) AccessController.doPrivileged(this );
0760: } catch (PrivilegedActionException pae) {
0761: throw (StandardException) pae.getException();
0762: } finally {
0763: actionIdentity = null;
0764: }
0765: }
0766:
0767: protected StorageFile privGetFileName(ContainerKey identity,
0768: boolean stub, boolean errorOK, boolean tryAlternatePath)
0769: throws StandardException {
0770: StorageFile container = dataFactory.getContainerPath(identity,
0771: stub);
0772:
0773: // retry with small case 'c' and 'd'
0774: // bug fix for track 3444
0775: if (!container.exists() && tryAlternatePath) {
0776: container = dataFactory.getAlternateContainerPath(identity,
0777: stub);
0778: }
0779:
0780: if (!container.exists()) {
0781:
0782: StorageFile directory = container.getParentDir();
0783:
0784: if (!directory.exists()) {
0785: // make sure only 1 thread can create a segment at one time
0786: synchronized (dataFactory) {
0787: if (!directory.exists()) {
0788: if (!directory.mkdirs()) {
0789: if (errorOK) {
0790: return null;
0791: } else {
0792: throw StandardException
0793: .newException(
0794: SQLState.FILE_CANNOT_CREATE_SEGMENT,
0795: directory);
0796: }
0797: }
0798: }
0799: }
0800: }
0801: }
0802:
0803: return container;
0804: } // end of privGetFileName
0805:
0806: synchronized void createContainer(ContainerKey newIdentity)
0807: throws StandardException {
0808:
0809: if (SanityManager.DEBUG) {
0810: if ((spareSpace < 0) || (spareSpace > 100))
0811: SanityManager.THROWASSERT("invalid spare space "
0812: + spareSpace);
0813: }
0814:
0815: actionCode = CREATE_CONTAINER_ACTION;
0816: actionIdentity = newIdentity;
0817: try {
0818: AccessController.doPrivileged(this );
0819: } catch (PrivilegedActionException pae) {
0820: throw (StandardException) pae.getException();
0821: } finally {
0822: actionIdentity = null;
0823: }
0824: } // end of createContainer
0825:
0826: synchronized boolean removeFile(StorageFile file)
0827: throws SecurityException, StandardException {
0828: actionCode = REMOVE_FILE_ACTION;
0829: actionFile = file;
0830: try {
0831: return AccessController.doPrivileged(this ) != null;
0832: } catch (PrivilegedActionException pae) {
0833: throw (StandardException) pae.getException();
0834: } finally {
0835: actionFile = null;
0836: }
0837: } // end of removeFile
0838:
0839: private boolean privRemoveFile(StorageFile file)
0840: throws StandardException {
0841: closeContainer();
0842:
0843: dataFactory.writeInProgress();
0844: try {
0845: if (file.exists())
0846: return file.delete();
0847: } finally {
0848: dataFactory.writeFinished();
0849: }
0850:
0851: return true;
0852: } // end of privRemoveFile
0853:
0854: synchronized boolean openContainer(ContainerKey newIdentity)
0855: throws StandardException {
0856: actionCode = OPEN_CONTAINER_ACTION;
0857: actionIdentity = newIdentity;
0858: try {
0859: return AccessController.doPrivileged(this ) != null;
0860: } catch (PrivilegedActionException pae) {
0861: throw (StandardException) pae.getException();
0862: } finally {
0863: actionIdentity = null;
0864: }
0865: }
0866:
0867: private synchronized void stubbify(LogInstant instant)
0868: throws StandardException {
0869: // update header, synchronized this in case the cache is cleaning
0870: // this container at the same time. Make sure the clean and
0871: // stubbify is mutually exclusive.
0872: setDroppedState(true);
0873: setCommittedDropState(true);
0874:
0875: // The whole container should be shrunk into a 'stub'.
0876: // If the file system supports truncation, we can just truncate the
0877: // file after the header. Since it doesn't, we need to write out a
0878: // seperate file (the stub), then reset fileData to point to that,
0879: // then remove the current file.
0880: //
0881: // There may still be dirty pages that belongs to this file which are
0882: // still in the page cache. They need not really
0883: // be written since they don't really exist anymore
0884: //
0885: // there are 3 pieces of information on disk :
0886: // 1) the log operation that caused this file to be stubbified
0887: // 2) the stub
0888: // 3) the file
0889: //
0890: // The order of event, as far as persisent store is concerned, is
0891: // A) stub shows up
0892: // B) the file disappear
0893: // C) the log operation got flushed
0894: // (B and C may swap order)
0895: //
0896: // If neither A or B happens (we crashed before the sync call),
0897: // then nothing happened.
0898: //
0899: // if A happened but B and C did not, then when we recover, we will not
0900: // know the file has been stubbified. Hopefully, it will be stubbified
0901: // again if the post-commit queue manager is alerted to the fact.
0902: //
0903: // if A and B happened but C did not, then the file is stubbified but
0904: // there is no log record to indicate that. This is undesirable but
0905: // still safe because the only time we stubbify is on a post commit
0906: // operation, i.e., either a create container has rolled back or a
0907: // dropped container has committed. We end up having a a container
0908: // stub which behaves the same as a dropped container - only that all
0909: // the redo work is unnecessary because we 'know' it will
0910: // eventually be dropped and committed.
0911: //
0912: // If A and C happened and not B, then during redo, this stubbify
0913: // routine will be called again and the file will be deleted again
0914: //
0915: // The reason why A has to be sync'ed out is that we don't want B to
0916: // happen but A did not and the system crashed. Then we are left
0917: // with neither the file nor the stub and maybe even no log record.
0918: // Then the system is not recoverable.
0919:
0920: actionIdentity = (ContainerKey) getIdentity();
0921: actionInstant = instant;
0922: actionCode = STUBBIFY_ACTION;
0923: try {
0924: AccessController.doPrivileged(this );
0925: } catch (PrivilegedActionException pae) {
0926: throw (StandardException) pae.getException();
0927: } finally {
0928: actionIdentity = null;
0929: actionInstant = null;
0930: }
0931: }
0932:
0933: /**
0934: * Backup the container.
0935: *
0936: * @param handle the container handle.
0937: * @param backupLocation location of the backup container.
0938: * @exception StandardException Standard Derby error policy
0939: */
0940: protected void backupContainer(BaseContainerHandle handle,
0941: String backupLocation) throws StandardException {
0942: actionContainerHandle = handle;
0943: actionBackupLocation = backupLocation;
0944: actionCode = BACKUP_CONTAINER_ACTION;
0945: try {
0946: AccessController.doPrivileged(this );
0947: } catch (PrivilegedActionException pae) {
0948: throw (StandardException) pae.getException();
0949: } finally {
0950: actionContainerHandle = null;
0951: actionBackupLocation = null;
0952: }
0953: }
0954:
0955: /**
0956: * Backup the container.
0957: *
0958: * The container is written to the backup by reading the pages
0959: * through the page cache, and then writing into the backup container.
0960: * If the container is dropped(commitetd drop), only container stub is
0961: * copied to the backup using simple file copy.
0962: *
0963: * MT -
0964: * At any given time only one backup thread is allowed, but when backup in
0965: * progress DML/DDL operations can run in parallel. Pages are latched while
0966: * writing them to the backup to avoid copying partial changes to the pages.
0967: * Online backup does not acquire any user level locks , so users can drop
0968: * tables when backup is in progress. So it is possible that Container
0969: * Removal request can come in when container backup is in progress.
0970: * This case is handled by using the synchronization on this object monitor
0971: * and using inRemove and inBackup flags. Conatiner removal checks if backup
0972: * is in progress and wait for the backup to yield to continue the removal.
0973: * Basic idea is to give preference to remove by stopping the backup of the
0974: * container temporarily, when the remove container is requested by another
0975: * thread. Generally, it takes more time to backup a regular container than
0976: * the stub becuase stub is just one page. After each page copy, a check is
0977: * made to find if a remove is requested and if it is then backup of the
0978: * container is aborted and the backup thread puts itself into the wait state until
0979: * remove request thread notifies that the remove is complete. When
0980: * remove request compeletes stub is copied into the backup.
0981: *
0982: * Compress is blocked when backup is in progesss, so truncation of the
0983: * container can not happen when backup is in progess. No need to
0984: * synchronize backup of the container with truncation.
0985: *
0986: *
0987: * @param handle the container handle.
0988: * @param backupLocation location of the backup container.
0989: * @exception StandardException Derby Standard error policy
0990: *
0991: */
0992: private void privBackupContainer(BaseContainerHandle handle,
0993: String backupLocation) throws StandardException {
0994: boolean backupCompleted = false;
0995: File backupFile = null;
0996: RandomAccessFile backupRaf = null;
0997: boolean isStub = false;
0998: BasePage page = null;
0999:
1000: while (!backupCompleted) {
1001: try {
1002:
1003: synchronized (this ) {
1004: // wait if some one is removing the
1005: // container because of a drop.
1006: while (inRemove) {
1007: try {
1008: wait();
1009: } catch (InterruptedException ie) {
1010: throw StandardException.interrupt(ie);
1011: }
1012: }
1013:
1014: if (getCommittedDropState())
1015: isStub = true;
1016: inBackup = true;
1017: }
1018:
1019: // create container at the backup location.
1020: if (isStub) {
1021: // get the stub ( it is a committted drop table container )
1022: StorageFile file = privGetFileName(
1023: (ContainerKey) getIdentity(), true, false,
1024: true);
1025: backupFile = new File(backupLocation, file
1026: .getName());
1027:
1028: // directly copy the stub to the backup
1029: if (!FileUtil.copyFile(dataFactory
1030: .getStorageFactory(), file, backupFile)) {
1031: throw StandardException.newException(
1032: SQLState.RAWSTORE_ERROR_COPYING_FILE,
1033: file, backupFile);
1034: }
1035: } else {
1036: // regular container file
1037: long lastPageNumber = getLastPageNumber(handle);
1038: if (lastPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) {
1039: // last page number is invalid if there are no pages in
1040: // the container yet. No need to backup this container,
1041: // this container creation is yet to complete.The reason
1042: // backup is getting called on such a container is
1043: // because container handle appears in the cache after
1044: // the file is created on the disk but before it's
1045: // first page is allocated.
1046: return;
1047: }
1048:
1049: StorageFile file = privGetFileName(
1050: (ContainerKey) getIdentity(), false, false,
1051: true);
1052:
1053: backupFile = new File(backupLocation, file
1054: .getName());
1055: backupRaf = new RandomAccessFile(backupFile, "rw");
1056:
1057: byte[] encryptionBuf = null;
1058: if (dataFactory.databaseEncrypted()) {
1059: // Backup uses seperate encryption buffer to encrypt the
1060: // page instead of encryption buffer used by the regular
1061: // conatiner writes. Otherwise writes to the backup
1062: // has to be synchronized with regualar database writes
1063: // because backup can run in parallel to container
1064: // writes.
1065: encryptionBuf = new byte[pageSize];
1066: }
1067:
1068: // copy all the pages of the container from the database
1069: // to the backup location by reading through the page cache.
1070: for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER; pageNumber <= lastPageNumber; pageNumber++) {
1071: page = getLatchedPage(handle, pageNumber);
1072:
1073: // update the page array before writing to the disk
1074: // with container header and encrypt it if the database
1075: // is encrypted.
1076:
1077: byte[] dataToWrite = updatePageArray(
1078: pageNumber, page.getPageArray(),
1079: encryptionBuf, false);
1080: backupRaf.write(dataToWrite, 0, pageSize);
1081:
1082: // unlatch releases page from cache, see
1083: // StoredPage.releaseExclusive()
1084: page.unlatch();
1085: page = null;
1086:
1087: // check if some one wants to commit drop the table while
1088: // conatiner is being written to the backup. If so,
1089: // abort the backup and restart it once the drop
1090: // is complete.
1091:
1092: synchronized (this ) {
1093: if (inRemove) {
1094: break;
1095: }
1096: }
1097: }
1098: }
1099:
1100: // sync and close the backup conatiner. Incase of a stub,
1101: // it is already synced and closed while doing the copy.
1102: if (!isStub) {
1103: backupRaf.getFD().sync();
1104: backupRaf.close();
1105: backupRaf = null;
1106: }
1107:
1108: // backup of the conatiner is complete.
1109: backupCompleted = true;
1110:
1111: } catch (IOException ioe) {
1112: throw StandardException.newException(
1113: SQLState.BACKUP_FILE_IO_ERROR, ioe, backupFile);
1114: } finally {
1115: synchronized (this ) {
1116: inBackup = false;
1117: notifyAll();
1118: }
1119:
1120: if (page != null) {
1121: page.unlatch();
1122: page = null;
1123: }
1124:
1125: // if backup of container is not complete, close the file
1126: // handles and remove the container file from the backup
1127: // if it exists
1128: if (!backupCompleted && backupFile != null) {
1129: if (backupRaf != null) {
1130: try {
1131: backupRaf.close();
1132: backupRaf = null;
1133: } catch (IOException ioe) {
1134: throw StandardException.newException(
1135: SQLState.BACKUP_FILE_IO_ERROR, ioe,
1136: backupFile);
1137: }
1138: }
1139:
1140: if (backupFile.exists()) {
1141: if (!backupFile.delete())
1142: throw StandardException.newException(
1143: SQLState.UNABLE_TO_DELETE_FILE,
1144: backupFile);
1145: }
1146: }
1147: }
1148: }
1149: }
1150:
1151: /**
1152: * Create encrypted version of the container with the
1153: * user specified encryption properties.
1154: *
1155: * Read all the pages of the container from the original container
1156: * through the page cache, encrypt each page data with new encryption
1157: * mechanism and write to the specified container file.
1158: *
1159: * @param handle the container handle.
1160: * @param newFilePath file to store the new encrypted version of
1161: * the container
1162: * @exception StandardException Derby Standard error policy
1163: *
1164: */
1165: protected void encryptContainer(BaseContainerHandle handle,
1166: String newFilePath) throws StandardException {
1167: BasePage page = null;
1168: StorageFile newFile = dataFactory.getStorageFactory()
1169: .newStorageFile(newFilePath);
1170: StorageRandomAccessFile newRaf = null;
1171: try {
1172: long lastPageNumber = getLastPageNumber(handle);
1173:
1174: newRaf = privGetRandomAccessFile(newFile);
1175:
1176: byte[] encryptionBuf = null;
1177: encryptionBuf = new byte[pageSize];
1178:
1179: // copy all the pages from the current container to the
1180: // new container file after encryting the pages.
1181: for (long pageNumber = FIRST_ALLOC_PAGE_NUMBER; pageNumber <= lastPageNumber; pageNumber++) {
1182:
1183: page = getLatchedPage(handle, pageNumber);
1184:
1185: // update the page array before writing to the disk
1186: // with container header and encrypt it.
1187:
1188: byte[] dataToWrite = updatePageArray(pageNumber, page
1189: .getPageArray(), encryptionBuf, true);
1190: newRaf.write(dataToWrite, 0, pageSize);
1191:
1192: // unlatch releases page from cache.
1193: page.unlatch();
1194: page = null;
1195: }
1196:
1197: // sync the new version of the container.
1198: newRaf.sync(true);
1199: newRaf.close();
1200: newRaf = null;
1201:
1202: } catch (IOException ioe) {
1203: throw StandardException.newException(
1204: SQLState.FILE_CONTAINER_EXCEPTION, ioe, newFile);
1205: } finally {
1206:
1207: if (page != null) {
1208: page.unlatch();
1209: page = null;
1210: }
1211:
1212: if (newRaf != null) {
1213: try {
1214: newRaf.close();
1215: } catch (IOException ioe) {
1216: newRaf = null;
1217: throw StandardException.newException(
1218: SQLState.FILE_CONTAINER_EXCEPTION, ioe,
1219: newFile);
1220:
1221: }
1222: }
1223: }
1224: }
1225:
1226: synchronized StorageRandomAccessFile privGetRandomAccessFile(
1227: StorageFile file) throws SecurityException,
1228: StandardException {
1229: actionCode = GET_RANDOM_ACCESS_FILE_ACTION;
1230: actionFile = file;
1231: try {
1232: return (StorageRandomAccessFile) AccessController
1233: .doPrivileged(this );
1234: } catch (PrivilegedActionException pae) {
1235: throw (StandardException) pae.getException();
1236: } finally {
1237: actionFile = null;
1238: }
1239: }
1240:
1241: // PrivilegedExceptionAction method
1242: public Object run() throws StandardException, IOException {
1243: switch (actionCode) {
1244: case GET_FILE_NAME_ACTION:
1245: return privGetFileName(actionIdentity, actionStub,
1246: actionErrorOK, actionTryAlternatePath);
1247:
1248: case CREATE_CONTAINER_ACTION: {
1249: StorageFile file = privGetFileName(actionIdentity, false,
1250: false, false);
1251:
1252: try {
1253: if (file.exists()) {
1254: // note I'm left in the no-identity state as fillInIdentity()
1255: // hasn't been called.
1256: throw StandardException.newException(
1257: SQLState.FILE_EXISTS, file);
1258: }
1259: } catch (SecurityException se) {
1260: throw StandardException.newException(
1261: SQLState.FILE_CREATE, se, file);
1262: }
1263:
1264: try {
1265:
1266: // OK not to force WAL here, in fact, this operation preceeds the
1267: // creation of the log record to ensure sufficient space.
1268:
1269: dataFactory.writeInProgress();
1270: try {
1271: fileData = file.getRandomAccessFile("rw");
1272: } finally {
1273: dataFactory.writeFinished();
1274: }
1275:
1276: // This container format specifies that the first page is an
1277: // allocation page and the container information is stored within
1278: // it. The allocation page needs to be somewhat formatted
1279: // because if the system crashed after the create container log
1280: // operation is written, it needs to be well formed enough to get
1281: // the container information back out of it.
1282: //
1283: // Don't try to go thru the page cache here because the container
1284: // object cannot be found in the container cache at this point
1285: // yet. However, if we use the page cache to store the first
1286: // allocation page, then in order to write itself out, it needs to
1287: // ask the container to do so, which is going to create a
1288: // deadlock. The allocation page cannot write itself out without
1289: // going thru the container because it doesn't know where its
1290: // offset is. Here we effectively hardwired page 0 at offset 0 of
1291: // the container file to be the first allocation page.
1292:
1293: // create an embryonic page - if this is not a temporary container,
1294: // synchronously write out the file header.
1295: writeRAFHeader(
1296: fileData,
1297: true,
1298: (actionIdentity.getSegmentId() != ContainerHandle.TEMPORARY_SEGMENT));
1299:
1300: } catch (SecurityException se) {
1301:
1302: // only thrown by the RandomeAccessFile constructor,
1303: // so the file won't exist
1304: throw StandardException.newException(
1305: SQLState.FILE_CREATE, se, file);
1306:
1307: } catch (IOException ioe) {
1308:
1309: boolean fileDeleted;
1310: try {
1311: fileDeleted = privRemoveFile(file);
1312: } catch (SecurityException se) {
1313: throw StandardException.newException(
1314: SQLState.FILE_CREATE_NO_CLEANUP, ioe, file,
1315: se.toString());
1316: }
1317:
1318: if (!fileDeleted) {
1319: throw StandardException.newException(
1320: SQLState.FILE_CREATE_NO_CLEANUP, ioe, file,
1321: ioe.toString());
1322: }
1323:
1324: throw StandardException.newException(
1325: SQLState.FILE_CREATE, ioe, file);
1326: }
1327:
1328: canUpdate = true;
1329: return null;
1330: } // end of case CREATE_CONTAINER_ACTION
1331:
1332: case REMOVE_FILE_ACTION:
1333: return privRemoveFile(actionFile) ? this : null;
1334:
1335: case OPEN_CONTAINER_ACTION: {
1336: boolean isStub = false; // is this a stub?
1337:
1338: StorageFile file = privGetFileName(actionIdentity, false,
1339: true, true);
1340: if (file == null)
1341: return null;
1342:
1343: try {
1344: if (!file.exists()) {
1345:
1346: // file does not exist, may be it has been stubbified
1347: file = privGetFileName(actionIdentity, true, true,
1348: true);
1349: if (!file.exists())
1350: return null;
1351: isStub = true;
1352: }
1353: } catch (SecurityException se) {
1354: throw StandardException.newException(
1355: SQLState.DATA_UNEXPECTED_EXCEPTION, se);
1356: }
1357:
1358: canUpdate = false;
1359: try {
1360: if (!dataFactory.isReadOnly() && file.canWrite())
1361: canUpdate = true;
1362: } catch (SecurityException se) {
1363: // just means we can't write to it.
1364: }
1365:
1366: try {
1367:
1368: fileData = file.getRandomAccessFile(canUpdate ? "rw"
1369: : "r");
1370: fileData.seek(FIRST_ALLOC_PAGE_OFFSET);
1371: readHeader(fileData);
1372:
1373: if (SanityManager.DEBUG) {
1374: if (isStub)
1375: SanityManager.ASSERT(getDroppedState()
1376: && getCommittedDropState(),
1377: "a stub failed to set drop state");
1378: }
1379:
1380: } catch (IOException ioe) {
1381:
1382: if (isStub) {
1383: throw dataFactory.markCorrupt(StandardException
1384: .newException(
1385: SQLState.FILE_CONTAINER_EXCEPTION,
1386: ioe, this ));
1387: }
1388:
1389: // maybe it is being stubbified... try that
1390: StorageFile stub = privGetFileName(actionIdentity,
1391: true, true, true);
1392:
1393: if (stub.exists()) {
1394: try {
1395: boolean delete_status = privRemoveFile(file);
1396: if (SanityManager.DEBUG) {
1397: if (!delete_status) {
1398: SanityManager
1399: .THROWASSERT("delete of file ("
1400: + file + ") failed.");
1401: }
1402: }
1403:
1404: fileData = stub
1405: .getRandomAccessFile(canUpdate ? "rw"
1406: : "r");
1407:
1408: readHeader(fileData);
1409: } catch (IOException ioe2) {
1410: throw dataFactory
1411: .markCorrupt(StandardException
1412: .newException(
1413: SQLState.FILE_CONTAINER_EXCEPTION,
1414: ioe2, this ));
1415: }
1416:
1417: // RESOLVE: this is a temporary hack
1418:
1419: } else
1420: throw dataFactory.markCorrupt(StandardException
1421: .newException(
1422: SQLState.FILE_CONTAINER_EXCEPTION,
1423: ioe, this ));
1424: }
1425:
1426: return this ;
1427: } // end of case OPEN_CONTAINER_ACTION
1428:
1429: case STUBBIFY_ACTION: {
1430: StorageFile file = privGetFileName(actionIdentity, false,
1431: false, true);
1432: StorageFile stub = privGetFileName(actionIdentity, true,
1433: false, false);
1434:
1435: StorageRandomAccessFile stubData = null;
1436:
1437: try {
1438: // !!!!!
1439: // bumpContainerVersion();
1440: //
1441: // do NOT bump the container version. We WANT the stubbify
1442: // operation to get redone every time. This is because this
1443: // operation first writes out the stub and then remove the
1444: // container file. If we bump the version, then the stub will
1445: // contain the new version. And if the system crashes right then,
1446: // then we will skip the whole operation during redo even though
1447: // the container file may not have been removed. Since we don't
1448: // want to have the remove happen before the stub is written, we
1449: // cannot sync it and therefore cannot be sure the remove
1450: // happened before the system crashed.
1451:
1452: if (!stub.exists()) {
1453: // write the header to the stub
1454: stubData = stub.getRandomAccessFile("rw");
1455:
1456: writeRAFHeader(stubData, true, /* create */
1457: true); /* sync */
1458:
1459: stubData.close();
1460: stubData = null;
1461: }
1462:
1463: // Force WAL and check for database corruption before removing file.
1464: // This is one operation where the container is changed on disk
1465: // directly without going thru the container cache, which otherwise
1466: // would have force WAL. Take care of it here.
1467: dataFactory.flush(actionInstant);
1468:
1469: // try to remove the container file
1470: // fileDate is not null only if we are redoing a removeContainer
1471: // (stubbify) operation. Then fileData acutally is opened against
1472: // the stub and the original container file does not exist.
1473: // Then we need to close it here because this method is called by
1474: // cache.remove and nobody will be able to see fileData after this.
1475: privRemoveFile(file);
1476:
1477: } catch (SecurityException se) {
1478: throw StandardException.newException(
1479: SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se
1480: .toString());
1481: } catch (IOException ioe) {
1482: // exception thrown while in creating the stub. Remove the
1483: // (half-baked) stub
1484: try {
1485: if (stubData != null) {
1486: stubData.close();
1487: stub.delete();
1488: stubData = null;
1489: }
1490:
1491: if (fileData != null) {
1492: fileData.close();
1493: fileData = null;
1494: }
1495: } catch (IOException ioe2) {
1496: throw StandardException.newException(
1497: SQLState.FILE_CANNOT_REMOVE_FILE, ioe2,
1498: file, ioe.toString());
1499: } catch (SecurityException se) {
1500: throw StandardException.newException(
1501: SQLState.FILE_CANNOT_REMOVE_FILE, se, file,
1502: stub);
1503: }
1504: }
1505:
1506: //let the data factory know about this the stub file;It
1507: // could remove when next checkpoint occurs if it's not necessary for recovery
1508: dataFactory.stubFileToRemoveAfterCheckPoint(stub,
1509: actionInstant, getIdentity());
1510: return null;
1511: } // end of case STUBBIFY_ACTION
1512:
1513: case BACKUP_CONTAINER_ACTION: {
1514: privBackupContainer(actionContainerHandle,
1515: actionBackupLocation);
1516: return null;
1517: } // end of case BACKUP_CONTAINER_ACTION
1518:
1519: case GET_RANDOM_ACCESS_FILE_ACTION: {
1520: return actionFile.getRandomAccessFile("rw");
1521: } // end of case BACKUP_CONTAINER_ACTION
1522:
1523: } // end of switch
1524: return null;
1525:
1526: } // end of run
1527: }
|