0001: /*
0002:
0003: Derby - Class org.apache.derby.impl.store.raw.data.FileContainer
0004:
0005: Licensed to the Apache Software Foundation (ASF) under one or more
0006: contributor license agreements. See the NOTICE file distributed with
0007: this work for additional information regarding copyright ownership.
0008: The ASF licenses this file to you under the Apache License, Version 2.0
0009: (the "License"); you may not use this file except in compliance with
0010: the License. You may obtain a copy of the License at
0011:
0012: http://www.apache.org/licenses/LICENSE-2.0
0013:
0014: Unless required by applicable law or agreed to in writing, software
0015: distributed under the License is distributed on an "AS IS" BASIS,
0016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017: See the License for the specific language governing permissions and
0018: limitations under the License.
0019:
0020: */
0021:
0022: package org.apache.derby.impl.store.raw.data;
0023:
0024: import org.apache.derby.iapi.reference.Property;
0025:
0026: import org.apache.derby.iapi.reference.Limits;
0027: import org.apache.derby.iapi.reference.SQLState;
0028:
0029: import org.apache.derby.impl.store.raw.data.BaseContainer;
0030: import org.apache.derby.impl.store.raw.data.BaseContainerHandle;
0031: import org.apache.derby.impl.store.raw.data.BasePage;
0032: import org.apache.derby.impl.store.raw.data.PageVersion;
0033:
0034: import org.apache.derby.iapi.services.cache.Cacheable;
0035: import org.apache.derby.iapi.services.cache.CacheManager;
0036: import org.apache.derby.iapi.services.context.ContextService;
0037: import org.apache.derby.iapi.services.daemon.DaemonService;
0038: import org.apache.derby.iapi.services.daemon.Serviceable;
0039: import org.apache.derby.iapi.services.monitor.Monitor;
0040: import org.apache.derby.iapi.services.sanity.SanityManager;
0041: import org.apache.derby.iapi.services.io.FormatIdUtil;
0042: import org.apache.derby.iapi.services.io.FormatIdOutputStream;
0043: import org.apache.derby.iapi.services.io.StoredFormatIds;
0044: import org.apache.derby.iapi.services.io.TypedFormat;
0045:
0046: import org.apache.derby.iapi.error.StandardException;
0047: import org.apache.derby.iapi.store.raw.ContainerHandle;
0048: import org.apache.derby.iapi.store.raw.ContainerKey;
0049: import org.apache.derby.iapi.store.raw.LockingPolicy;
0050: import org.apache.derby.iapi.store.raw.Loggable;
0051: import org.apache.derby.iapi.store.raw.Page;
0052: import org.apache.derby.iapi.store.raw.PageKey;
0053: import org.apache.derby.iapi.store.raw.PageTimeStamp;
0054: import org.apache.derby.iapi.store.raw.RecordHandle;
0055: import org.apache.derby.iapi.store.raw.RawStoreFactory;
0056: import org.apache.derby.iapi.store.raw.Transaction;
0057:
0058: import org.apache.derby.iapi.store.raw.log.LogInstant;
0059: import org.apache.derby.iapi.store.raw.xact.RawTransaction;
0060:
0061: import org.apache.derby.iapi.store.access.TransactionController;
0062: import org.apache.derby.iapi.store.access.AccessFactory;
0063: import org.apache.derby.iapi.store.access.SpaceInfo;
0064:
0065: import org.apache.derby.iapi.services.io.ArrayInputStream;
0066: import org.apache.derby.iapi.services.io.ArrayOutputStream;
0067: import org.apache.derby.iapi.services.property.PropertyUtil;
0068: import org.apache.derby.iapi.util.ByteArray;
0069:
0070: import java.io.IOException;
0071: import java.io.DataInput;
0072: import java.io.DataOutput;
0073:
0074: import java.util.Properties;
0075: import java.util.zip.CRC32;
0076:
0077: /**
0078: FileContainer is an abstract base class for containers
0079: which are based on files.
0080:
0081: This class extends BaseContainer and implements Cacheable and TypedFormat
0082: */
0083:
0084: abstract class FileContainer extends BaseContainer implements
0085: Cacheable, TypedFormat {
0086:
0087: /*
0088: * typed format
0089: */
0090:
0091: protected static final int formatIdInteger = StoredFormatIds.RAW_STORE_SINGLE_CONTAINER_FILE;
0092:
0093: // format Id must fit in 4 bytes
0094:
0095: /**
0096: Return my format identifier.
0097: */
0098: public int getTypeFormatId() {
0099: return StoredFormatIds.RAW_STORE_SINGLE_CONTAINER_FILE;
0100: }
0101:
0102: /*
0103: ** Immutable fields
0104: */
0105:
0106: protected final CacheManager pageCache; // my page's cache
0107: protected final CacheManager containerCache; // cache I am in.
0108: protected final BaseDataFileFactory dataFactory; // creating factory
0109:
0110: /*
0111: ** Fields that are mutable only during identity changes
0112: */
0113:
0114: protected int pageSize; // size of my pages
0115: protected int spareSpace; // % space kept free on page in inserts
0116: protected int minimumRecordSize; // minimum space a record should
0117: // occupy on the page.
0118:
0119: protected short initialPages; // initial number of pages preallocated
0120: // to the container when created
0121:
0122: protected boolean canUpdate; // can I be written to?
0123:
0124: private int PreAllocThreshold; // how many pages before preallocation
0125: // kicks in, only stored in memory
0126: private int PreAllocSize; // how many pages to preallocate at once
0127: // only stored in memory
0128: private boolean bulkIncreaseContainerSize;// if true, the next addPage will
0129: // attempt to preallocate a larger
0130: // than normal number of pages.
0131: //
0132: // preallocation parameters
0133: private static final int PRE_ALLOC_THRESHOLD = 8;
0134: private static final int MIN_PRE_ALLOC_SIZE = 1;
0135: private static final int DEFAULT_PRE_ALLOC_SIZE = 8;
0136: private static final int MAX_PRE_ALLOC_SIZE = 1000;
0137:
0138: /*
0139: ** Mutable fields, only valid when the identity is valid.
0140: */
0141:
0142: // RESOLVE: if we run out of bytes in the container, we can change
0143: // containerVersion from a long to an int because this number is only
0144: // bumped when the container is dropped (and rolled back), so it is almost
0145: // impossible for the containverVersion to get beyond a short, let alone
0146: // and int - someone will have to write an application that attempt to drop
0147: // the container 2 billion times for that to happen.
0148: protected long firstAllocPageNumber; // first alloc page number
0149: protected long firstAllocPageOffset; // first alloc page offset
0150: protected long containerVersion; // the logged version number
0151: protected long estimatedRowCount; // value is changed unlogged
0152: protected LogInstant lastLogInstant; // last time this container
0153: // object was touched.
0154: /**
0155: * The sequence number for reusable recordIds .
0156: * As long as this number does not change, recordIds will be stable within
0157: * the container.
0158: **/
0159: private long reusableRecordIdSequenceNumber;
0160:
0161: /**
0162: The page that was last inserted into. Use this for getPageForInsert.
0163: Remember the last allocated non-overflow page, and remember it in
0164: memory only.
0165: Use Get/Set method to access this field except when we know it is
0166: being single thread access.
0167: */
0168: private long lastInsertedPage[];
0169: private int lastInsertedPage_index;
0170:
0171: /**
0172: The last unfilled page found. Use this for getPageForInsert.
0173: Remember the last unfilled page found, and remember it in memory only.
0174: Use Get/Set method to access this field except when we know it is
0175: being single thread access.
0176: */
0177: private long lastUnfilledPage;
0178:
0179: /**
0180: The last allocated page. This global var is access *without*
0181: synchronization. It is used as a hint for page allocation to find the
0182: next reusable page.
0183: */
0184: private long lastAllocatedPage;
0185:
0186: /**
0187: An estimated page count. Use this for getEstimatedPagecount.
0188: Remember it in memory only.
0189: */
0190: private long estimatedPageCount;
0191:
0192: // The isDirty flag indicates if the container has been modified. The
0193: // preDirty flag indicates that the container is about to be modified. The
0194: // reason for these 2 flags instead of just one is to accomodate
0195: // checkpoint. After a clean container sends a log record to the log
0196: // stream but before that conatiner is dirtied by the log operation, a
0197: // checkpoint could be taken. If so, then the redoLWM will be after the
0198: // log record but, without preDirty, the cache cleaning will not have
0199: // waited for the change. So the preDirty bit is to stop the cache
0200: // cleaning from skipping over this container even though it has not really
0201: // been modified yet.
0202: protected boolean preDirty;
0203: protected boolean isDirty;
0204:
0205: /*
0206: allocation information cached by the container object.
0207:
0208: <P>MT -
0209: Access to the allocation cache MUST be synchronized on the allocCache
0210: object. FileContainer manages all MT issue w/r to AllocationCache.
0211: The AllocationCache object itself is not MT-safe.
0212: <P>
0213: The protocol for accessing both the allocation cache and the alloc page
0214: is: get the alloc cache semaphore, then get the alloc page. Once both
0215: are held, they can be released in any order.
0216: <BR>
0217: It is legal to get one or the other, i.e, it is legal to only get the
0218: alloc cache semaphore without latching the alloc page, and it is legal
0219: to get the alloc page latch without the alloc cache semaphore.
0220: <BR>
0221: it is illegal to hold alloc page latch and then get the allocation
0222: cache semaphore
0223: <PRE>
0224: Writer to alloc Page (to invalidate alloc cache)
0225: 1) synchronized(allocCache)
0226: 2) invalidate cache
0227: 3) get latch on alloc Page
0228: 4) release synchonized(allocCache)
0229:
0230: Reader:
0231: 1) synchronized(allocCache)
0232: 2) if valid, read value and release synchronized(allocCache)
0233: 3) if cache is invalid, get latch on alloc page
0234: 4) validate cache
0235: 5) release alloc page latch
0236: 6) read value
0237: 7) release synchonized(allocCache)
0238: </PRE>
0239: */
0240: protected AllocationCache allocCache;
0241:
0242: /*
0243: * array to store persistently stored fields
0244: */
0245: byte[] containerInfo;
0246:
0247: private CRC32 checksum; // holder for the checksum
0248:
0249: /*
0250: ** buffer for encryption/decryption
0251: */
0252: private byte[] encryptionBuffer;
0253:
0254: /*
0255: * constants
0256: */
0257:
0258: /** the container format must fit in this many bytes */
0259: private static final int CONTAINER_FORMAT_ID_SIZE = 4;
0260:
0261: /* the checksum size */
0262: protected static final int CHECKSUM_SIZE = 8;
0263:
0264: /**
0265: The size of the persistently stored container info
0266: ContainerHeader contains the following information:
0267: 4 bytes int FormatId
0268: 4 bytes int status
0269: 4 bytes int pageSize
0270: 4 bytes int spareSpace
0271: 4 bytes int minimumRecordSize
0272: 2 bytes short initialPages
0273: 2 bytes short spare1
0274: 8 bytes long first Allocation page number
0275: 8 bytes long first Allocation page offset
0276: 8 bytes long container version
0277: 8 bytes long estimated number of rows
0278: 8 bytes long reusable recordId sequence number
0279: 8 bytes long spare3
0280: 8 bytes long checksum
0281: container info size is 80 bytes, with 10 bytes of spare space
0282: */
0283: protected static final int CONTAINER_INFO_SIZE = CONTAINER_FORMAT_ID_SIZE
0284: + 4
0285: + 4
0286: + 4
0287: + 4
0288: + 2
0289: + 2
0290: + 8
0291: + 8
0292: + 8
0293: + 8
0294: + CHECKSUM_SIZE
0295: + 8
0296: + 8;
0297:
0298: /**
0299: the number of arguments we need to pass to alloc page for create
0300: */
0301: protected static final int STORED_PAGE_ARG_NUM = 5;
0302: protected static final int ALLOC_PAGE_ARG_NUM = 6;
0303:
0304: /**
0305: * where the first alloc page is located -
0306: * the logical page number and the physical page offset
0307: * NOTE if it is not 0 this is not going to work for Stream
0308: * file which doesn't support seek
0309: */
0310: public static final long FIRST_ALLOC_PAGE_NUMBER = 0L;
0311: public static final long FIRST_ALLOC_PAGE_OFFSET = 0L;
0312:
0313: // file status for persistent storage
0314: private static final int FILE_DROPPED = 0x1;
0315: private static final int FILE_COMMITTED_DROP = 0x2;
0316:
0317: // recordId in this container can be reused when a page is reused.
0318: private static final int FILE_REUSABLE_RECORDID = 0x8;
0319:
0320: protected static final String SPACE_TRACE = (SanityManager.DEBUG ? "SpaceTrace"
0321: : null);
0322:
0323: FileContainer(BaseDataFileFactory factory) {
0324: dataFactory = factory;
0325: pageCache = factory.getPageCache();
0326: containerCache = factory.getContainerCache();
0327:
0328: initContainerHeader(true);
0329: }
0330:
0331: /**
0332: Get information about space used by the container.
0333: **/
0334: public SpaceInfo getSpaceInfo(BaseContainerHandle handle)
0335: throws StandardException {
0336: SpaceInformation spaceInfo;
0337: synchronized (allocCache) {
0338: spaceInfo = allocCache.getAllPageCounts(handle,
0339: firstAllocPageNumber);
0340: }
0341: spaceInfo.setPageSize(pageSize);
0342: return spaceInfo;
0343: }
0344:
0345: /*
0346: ** Methods of Cacheable
0347: **
0348: ** getIdentity() and clearIdentity() are implemented by BaseContainer
0349: */
0350:
0351: /**
0352: Containers
0353: */
0354:
0355: /**
0356: Open the container.
0357:
0358: @return a valid object if the container was successfully opened, null if
0359: it does not exist.
0360:
0361: @exception StandardException Some problem in opening a container.
0362:
0363: @see Cacheable#setIdentity
0364: */
0365: public Cacheable setIdentity(Object key) throws StandardException {
0366: return setIdent((ContainerKey) key);
0367: }
0368:
0369: /**
0370: * Open the container.
0371: * <p>
0372: * Open the container with key "newIdentity".
0373: * <p>
0374: * should be same name as setIdentity but seems to cause method resolution
0375: * ambiguities
0376: *
0377: * @exception StandardException Some problem in opening a container.
0378: *
0379: * @see Cacheable#setIdentity
0380: **/
0381: protected Cacheable setIdent(ContainerKey newIdentity)
0382: throws StandardException {
0383: boolean ok = openContainer(newIdentity);
0384:
0385: initializeLastInsertedPage(1);
0386: lastUnfilledPage = ContainerHandle.INVALID_PAGE_NUMBER;
0387: lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER;
0388:
0389: estimatedPageCount = -1;
0390:
0391: if (ok) {
0392: // set up our identity.
0393: // If we raise an exception after this we must clear our identity.
0394: fillInIdentity(newIdentity);
0395: return this ;
0396: } else {
0397: return null;
0398: }
0399: }
0400:
0401: public Cacheable createIdentity(Object key, Object createParameter)
0402: throws StandardException {
0403: if (SanityManager.DEBUG) {
0404: SanityManager.ASSERT(!(key instanceof PageKey),
0405: "PageKey input to create container");
0406: }
0407:
0408: return createIdent((ContainerKey) key, createParameter);
0409: }
0410:
0411: // should be same name as createIdentity but seems to cause method
0412: // resolution ambiguities
0413: protected Cacheable createIdent(ContainerKey newIdentity,
0414: Object createParameter) throws StandardException {
0415: // createParameter will be this object if this method is being called
0416: // from itself to re-initialize the container (only for tempRAF)
0417: // if createParameter == this, do not reinitialize the header, this
0418: // object is not being reused for another container
0419: if (createParameter != this ) {
0420: initContainerHeader(true /* change to different container */);
0421:
0422: if (createParameter != null
0423: && (createParameter instanceof ByteArray)) {
0424: // this is called during load tran, the create container
0425: // Operation has a byte array created by logCreateContainerInfo
0426: // which contains all the information necessary to recreate the
0427: // container. Use that to recreate the container properties.
0428:
0429: createInfoFromLog((ByteArray) createParameter);
0430: } else {
0431: if (SanityManager.DEBUG) {
0432: if (createParameter != null
0433: && !(createParameter instanceof Properties)) {
0434: SanityManager
0435: .THROWASSERT("Expecting a non-null createParameter to a "
0436: + "Properties instead of "
0437: + createParameter.getClass()
0438: .getName());
0439: }
0440: }
0441:
0442: createInfoFromProp((Properties) createParameter);
0443: }
0444: } else {
0445: // we don't need to completely re-initialize the header
0446: // just re-initialize the relavent fields
0447: initContainerHeader(false);
0448: }
0449:
0450: if (initialPages > 1) {
0451: PreAllocThreshold = 0;
0452: PreAllocSize = initialPages;
0453: bulkIncreaseContainerSize = true;
0454: } else {
0455: PreAllocThreshold = PRE_ALLOC_THRESHOLD;
0456: }
0457:
0458: createContainer(newIdentity);
0459:
0460: setDirty(true);
0461:
0462: // set up our identity.
0463: // If we raise an exception after this we must clear our identity.
0464: fillInIdentity(newIdentity);
0465:
0466: return this ;
0467: }
0468:
0469: public void clearIdentity() {
0470:
0471: closeContainer();
0472:
0473: initializeLastInsertedPage(1);
0474: lastUnfilledPage = ContainerHandle.INVALID_PAGE_NUMBER;
0475: lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER;
0476:
0477: canUpdate = false;
0478: super .clearIdentity();
0479: }
0480:
0481: /**
0482: We treat this container as dirty if it has the container file open.
0483: @see Cacheable#isDirty
0484: */
0485: public boolean isDirty() {
0486: synchronized (this ) {
0487: return isDirty;
0488: }
0489: }
0490:
0491: public void preDirty(boolean preDirtyOn) {
0492: synchronized (this ) {
0493: if (preDirtyOn) {
0494: // prevent the cleaner from cleaning this container or skipping
0495: // over it until the operation which preDirtied it got a chance
0496: // to do the change.
0497: preDirty = true;
0498: } else {
0499: preDirty = false;
0500: // if a cleaner is waiting on the dirty bit, wake it up
0501: notifyAll();
0502: }
0503: }
0504: }
0505:
0506: protected void setDirty(boolean dirty) {
0507: synchronized (this ) {
0508: preDirty = false;
0509: isDirty = dirty;
0510:
0511: // if a cleaner is waiting on the dirty bit, wake it up
0512: notifyAll();
0513: }
0514: }
0515:
0516: /*
0517: ** Container creation, opening, and closing
0518: */
0519:
0520: /**
0521: * Create a new container.
0522: * <p>
0523: * Create a new container, all references to identity must be through the
0524: * passed in identity, this object will no identity until after this
0525: * method returns.
0526: *
0527: * @exception StandardException Cloudscape Standard error policy
0528: **/
0529: abstract void createContainer(ContainerKey newIdentity)
0530: throws StandardException;
0531:
0532: /**
0533: * Open a container.
0534: * <p>
0535: * Longer descrption of routine.
0536: * <p>
0537: * Open a container. Open the file that maps to this container, if the
0538: * file does not exist then we assume the container was never created.
0539: * If the file exists but we have trouble opening it then we throw some
0540: * exception.
0541: *
0542: * <BR> MT - single thread required - Enforced by cache manager.
0543: *
0544: * @exception StandardException Standard exception policy.
0545: **/
0546: abstract boolean openContainer(ContainerKey newIdentity)
0547: throws StandardException;
0548:
0549: abstract void closeContainer();
0550:
0551: /**
0552: * Drop Container.
0553: * <p>
0554: *
0555: * @see Transaction#dropContainer
0556: *
0557: **/
0558: protected void dropContainer(LogInstant instant, boolean isDropped) {
0559: synchronized (this ) {
0560: setDroppedState(isDropped);
0561: setDirty(true);
0562: bumpContainerVersion(instant);
0563: }
0564: }
0565:
0566: /**
0567: increment the version by one and return the new version.
0568:
0569: <BR> MT - caller must synchronized this in the same sync block that
0570: modifies the container header.
0571: */
0572: protected final void bumpContainerVersion(LogInstant instant) {
0573: lastLogInstant = instant;
0574: ++containerVersion;
0575: }
0576:
0577: protected long getContainerVersion() {
0578: // it is not really necessary to synchronized this because the only time the
0579: // container version is looked at is during recovery, which is single
0580: // threaded at the moment. Put it in an sync block anyway just in case
0581: // some other people want to look at this for some bizarre reasons
0582: synchronized (this ) {
0583: return containerVersion;
0584: }
0585: }
0586:
0587: /**
0588: * Request the system properties associated with a container.
0589: * <p>
0590: * Request the value of properties that are associated with a container.
0591: * The following properties can be requested:
0592: * derby.storage.pageSize
0593: * derby.storage.pageReservedSpace
0594: * derby.storage.minimumRecordSize
0595: * derby.storage.reusableRecordId
0596: * cloudsacpe.storage.initialPages
0597: * <p>
0598: * To get the value of a particular property add it to the property list,
0599: * and on return the value of the property will be set to it's current
0600: * value. For example:
0601: *
0602: * get_prop(ConglomerateController cc)
0603: * {
0604: * Properties prop = new Properties();
0605: * prop.put("derby.storage.pageSize", "");
0606: * cc.getContainerProperties(prop);
0607: *
0608: * System.out.println(
0609: * "table's page size = " +
0610: * prop.getProperty("derby.storage.pageSize");
0611: * }
0612: *
0613: * @param prop Property list to fill in.
0614: *
0615: * @exception StandardException Standard exception policy.
0616: **/
0617: public void getContainerProperties(Properties prop)
0618: throws StandardException {
0619: // derby.storage.pageSize
0620: if (prop.getProperty(Property.PAGE_SIZE_PARAMETER) != null) {
0621: prop.put(Property.PAGE_SIZE_PARAMETER, Integer
0622: .toString(pageSize));
0623: }
0624:
0625: // derby.storage.minimumRecordSize
0626: if (prop
0627: .getProperty(RawStoreFactory.MINIMUM_RECORD_SIZE_PARAMETER) != null) {
0628: prop.put(RawStoreFactory.MINIMUM_RECORD_SIZE_PARAMETER,
0629: Integer.toString(minimumRecordSize));
0630: }
0631:
0632: // derby.storage.pageReservedSpace
0633: if (prop
0634: .getProperty(RawStoreFactory.PAGE_RESERVED_SPACE_PARAMETER) != null) {
0635: prop.put(RawStoreFactory.PAGE_RESERVED_SPACE_PARAMETER,
0636: Integer.toString(spareSpace));
0637: }
0638:
0639: // derby.storage.reusableRecordId
0640: if (prop.getProperty(RawStoreFactory.PAGE_REUSABLE_RECORD_ID) != null) {
0641: Boolean bool = new Boolean(isReusableRecordId());
0642: prop.put(RawStoreFactory.PAGE_REUSABLE_RECORD_ID, bool
0643: .toString());
0644: }
0645:
0646: // derby.storage.initialPages
0647: if (prop.getProperty(RawStoreFactory.CONTAINER_INITIAL_PAGES) != null) {
0648: prop.put(RawStoreFactory.CONTAINER_INITIAL_PAGES, Integer
0649: .toString(initialPages));
0650: }
0651:
0652: }
0653:
0654: /**
0655: Read the container's header. Assumes the input stream (fileData)
0656: is positioned at the beginning of the file.
0657:
0658: Subclass that implements openContainer is expected to manufacture a DataInput
0659: stream which is used here to read the header.
0660:
0661: <BR> MT - single thread required - Enforced by caller.
0662:
0663: @exception StandardException Cloudscape Standard error policy
0664: @exception IOException error in reading the header from file
0665: */
0666: protected void readHeader(DataInput fileData) throws IOException,
0667: StandardException {
0668: // Always read the header from the input stread even if the alloc page may
0669: // still be in cache. This is because a stubbify operation only writes
0670: // the stub to disk, it did not get rid of any stale page from the page
0671: // cache. So if it so happen that the stubbified container object is
0672: // aged out of the container cache but the first alloc page hasn't,
0673: // then when any stale page of this container wants to be written out,
0674: // the container needs to be reopened, which is when this routine is
0675: // called. We must not get the alloc page in cache because it may be
0676: // stale page and it may still say the container has not been dropped.
0677:
0678: byte[] epage = getEmbryonicPage(fileData);
0679:
0680: // read persistent container header into containerInfo
0681: AllocPage.ReadContainerInfo(containerInfo, epage);
0682:
0683: // initialize header from information stored in containerInfo
0684: readHeaderFromArray(containerInfo);
0685:
0686: epage = null;
0687: }
0688:
0689: // initialize header information so this container object can be safely
0690: // reused as if this container object has just been new'ed
0691: private void initContainerHeader(boolean changeContainer) {
0692: if (containerInfo == null)
0693: containerInfo = new byte[CONTAINER_INFO_SIZE];
0694:
0695: if (checksum == null)
0696: checksum = new CRC32();
0697: else
0698: checksum.reset();
0699:
0700: if (allocCache == null)
0701: allocCache = new AllocationCache();
0702: else
0703: allocCache.reset();
0704:
0705: if (changeContainer) {
0706: pageSize = 0;
0707: spareSpace = 0;
0708: minimumRecordSize = 0;
0709: }
0710:
0711: initialPages = 1;
0712: firstAllocPageNumber = ContainerHandle.INVALID_PAGE_NUMBER;
0713: firstAllocPageOffset = -1;
0714: containerVersion = 0;
0715: estimatedRowCount = 0;
0716: reusableRecordIdSequenceNumber = 0;
0717:
0718: setDroppedState(false);
0719: setCommittedDropState(false);
0720: setReusableRecordIdState(false);
0721:
0722: // instance variables that are not stored on disk
0723: lastLogInstant = null;
0724:
0725: initializeLastInsertedPage(1);
0726: lastUnfilledPage = ContainerHandle.INVALID_PAGE_NUMBER;
0727: lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER;
0728: estimatedPageCount = -1;
0729:
0730: PreAllocThreshold = PRE_ALLOC_THRESHOLD;
0731: PreAllocSize = DEFAULT_PRE_ALLOC_SIZE;
0732: bulkIncreaseContainerSize = false;
0733: }
0734:
0735: /**
0736: Read containerInfo from a byte array
0737: The container Header array must be written by or of
0738: the same format as put together by writeHeaderFromArray.
0739:
0740: @exception StandardException Cloudscape Standard error policy
0741: @exception IOException error in reading the header from file
0742: */
0743: private void readHeaderFromArray(byte[] a)
0744: throws StandardException, IOException {
0745: ArrayInputStream inStream = new ArrayInputStream(a);
0746:
0747: inStream.setLimit(0, CONTAINER_INFO_SIZE);
0748: int fid = inStream.readInt();
0749: if (fid != formatIdInteger) {
0750: throw StandardException.newException(
0751: SQLState.DATA_UNKNOWN_CONTAINER_FORMAT,
0752: getIdentity(), new Long(fid));
0753: }
0754:
0755: int status = inStream.readInt();
0756: pageSize = inStream.readInt();
0757: spareSpace = inStream.readInt();
0758: minimumRecordSize = inStream.readInt();
0759: initialPages = inStream.readShort();
0760: PreAllocSize = inStream.readShort();
0761: firstAllocPageNumber = inStream.readLong();
0762: firstAllocPageOffset = inStream.readLong();
0763: containerVersion = inStream.readLong();
0764: estimatedRowCount = inStream.readLong();
0765: reusableRecordIdSequenceNumber = inStream.readLong();
0766: lastLogInstant = null;
0767:
0768: if (PreAllocSize == 0) // pre 2.0, we don't store this.
0769: PreAllocSize = DEFAULT_PRE_ALLOC_SIZE;
0770:
0771: long spare3 = inStream.readLong(); // read spare long
0772:
0773: // upgrade - if this is a container that was created before
0774: // initialPages was stored, it will have a zero value. Set it to the
0775: // default of 1.
0776: if (initialPages == 0)
0777: initialPages = 1;
0778:
0779: // container read in from disk, reset preAllocation values
0780: PreAllocThreshold = PRE_ALLOC_THRESHOLD;
0781:
0782: // validate checksum
0783: long onDiskChecksum = inStream.readLong();
0784: checksum.reset();
0785: checksum.update(a, 0, CONTAINER_INFO_SIZE - CHECKSUM_SIZE);
0786:
0787: if (onDiskChecksum != checksum.getValue()) {
0788: PageKey pk = new PageKey(identity, FIRST_ALLOC_PAGE_NUMBER);
0789:
0790: throw dataFactory.markCorrupt(StandardException
0791: .newException(SQLState.FILE_BAD_CHECKSUM, pk,
0792: new Long(checksum.getValue()), new Long(
0793: onDiskChecksum),
0794: org.apache.derby.iapi.util.StringUtil
0795: .hexDump(a)));
0796: }
0797:
0798: allocCache.reset();
0799:
0800: // set the in memory state
0801: setDroppedState((status & FILE_DROPPED) != 0);
0802: setCommittedDropState((status & FILE_COMMITTED_DROP) != 0);
0803: setReusableRecordIdState((status & FILE_REUSABLE_RECORDID) != 0);
0804: }
0805:
0806: /**
0807: Write the container header to a page array (the first allocation page)
0808:
0809: @exception StandardException Cloudscape Standard error policy
0810: @exception IOException error in writing the header to file
0811: */
0812: protected void writeHeader(byte[] pageData)
0813: throws StandardException, IOException {
0814: // write out the current containerInfo in the borrowed space to byte
0815: // array containerInfo
0816: writeHeaderToArray(containerInfo);
0817:
0818: AllocPage.WriteContainerInfo(containerInfo, pageData, false);
0819: }
0820:
0821: /**
0822: Write the container header directly to output stream (fileData).
0823: Assumes the output stream is positioned at the beginning of the file.
0824:
0825: Subclasses that can writes the container header is expected to
0826: manufacture a DataOutput stream which is used here.
0827:
0828: <BR> MT - single thread required - Enforced by caller
0829:
0830: @exception StandardException Cloudscape Standard error policy
0831: @exception IOException error in writing the header to file
0832: */
0833: protected void writeHeader(DataOutput fileData, boolean create,
0834: byte[] epage) throws IOException, StandardException {
0835: // write out the current containerInfo in the borrowed space to byte
0836: // array containerInfo
0837: writeHeaderToArray(containerInfo);
0838:
0839: // RESOLVE: get no wait on the page cache to see if allocation page is
0840: // there, if so, use that instead of making a new array and a static
0841: // function.
0842:
0843: AllocPage.WriteContainerInfo(containerInfo, epage, create);
0844: // now epage has the containerInfo written inside it
0845:
0846: // force WAL - and check to see if database is corrupt or is frozen.
0847: dataFactory.flush(lastLogInstant);
0848: if (lastLogInstant != null)
0849: lastLogInstant = null;
0850:
0851: // write it out
0852: dataFactory.writeInProgress();
0853: try {
0854: fileData.write(epage);
0855: } finally {
0856: dataFactory.writeFinished();
0857: }
0858: }
0859:
0860: /**
0861: Get an embryonic page from the dataInput stream.
0862:
0863: If fileData is not null, then the embyronic page will be read
0864: in from the input stream (fileData), which is assumed to be
0865: positioned at the beginning of the first allocation page.
0866:
0867: if fileData is null, then just manufacture an array which
0868: is the size of an embryonic page.
0869:
0870: @exception IOException error in read the embryonic page from file
0871: */
0872: protected byte[] getEmbryonicPage(DataInput fileData)
0873: throws IOException {
0874: byte[] epage = new byte[AllocPage.MAX_BORROWED_SPACE];
0875:
0876: if (fileData != null) {
0877: fileData.readFully(epage);
0878: }
0879: return epage;
0880: }
0881:
0882: /**
0883: Write containerInfo into a byte array
0884: The container Header thus put together can be read by readHeaderFromArray.
0885:
0886: @exception IOException error in writing the header
0887: */
0888: private void writeHeaderToArray(byte[] a) throws IOException {
0889: if (SanityManager.DEBUG)
0890: SanityManager.ASSERT(a.length >= CONTAINER_INFO_SIZE,
0891: "header won't fit in array");
0892:
0893: ArrayOutputStream a_out = new ArrayOutputStream(a);
0894: FormatIdOutputStream outStream = new FormatIdOutputStream(a_out);
0895:
0896: int status = 0;
0897: if (getDroppedState())
0898: status |= FILE_DROPPED;
0899: if (getCommittedDropState())
0900: status |= FILE_COMMITTED_DROP;
0901: if (isReusableRecordId())
0902: status |= FILE_REUSABLE_RECORDID;
0903:
0904: a_out.setPosition(0);
0905: a_out.setLimit(CONTAINER_INFO_SIZE);
0906: outStream.writeInt(formatIdInteger);
0907: outStream.writeInt(status);
0908: outStream.writeInt(pageSize);
0909: outStream.writeInt(spareSpace);
0910: outStream.writeInt(minimumRecordSize);
0911: outStream.writeShort(initialPages);
0912: outStream.writeShort(PreAllocSize); // write spare1
0913: outStream.writeLong(firstAllocPageNumber);
0914: outStream.writeLong(firstAllocPageOffset);
0915: outStream.writeLong(containerVersion);
0916: outStream.writeLong(estimatedRowCount);
0917: outStream.writeLong(reusableRecordIdSequenceNumber);
0918: outStream.writeLong(0); //Write spare3
0919:
0920: checksum.reset();
0921: checksum.update(a, 0, CONTAINER_INFO_SIZE - CHECKSUM_SIZE);
0922:
0923: // write the checksum to the array
0924: outStream.writeLong(checksum.getValue());
0925:
0926: a_out.clearLimit();
0927: }
0928:
0929: /**
0930: Log all information on the container creation necessary to recreate the
0931: container during a load tran.
0932:
0933: @exception StandardException Cloudscape Standard error policy
0934: */
0935: protected ByteArray logCreateContainerInfo()
0936: throws StandardException {
0937: // just write out the whole container header
0938: byte[] array = new byte[CONTAINER_INFO_SIZE];
0939: if (array == null || array.length != CONTAINER_INFO_SIZE) {
0940: throw StandardException.newException(
0941: SQLState.DATA_OBJECT_ALLOCATION_FAILED, "byte[]");
0942: }
0943:
0944: try {
0945: writeHeaderToArray(array);
0946: } catch (IOException ioe) {
0947: throw StandardException.newException(
0948: SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
0949: }
0950:
0951: return new ByteArray(array);
0952: }
0953:
0954: /**
0955: Set container properties from the passed in ByteArray, which is created
0956: by logCreateContainerInfo. This information is used to recreate the
0957: container during recovery load tran.
0958:
0959: The following container properties are set:
0960:
0961: pageSize
0962: spareSpace
0963: minimumRecordSize
0964: isReusableRecordId
0965: initialPages
0966:
0967: */
0968: private void createInfoFromLog(ByteArray byteArray)
0969: throws StandardException {
0970: if (SanityManager.DEBUG) {
0971: SanityManager.ASSERT(byteArray != null,
0972: "setCreateContainerInfoFromLog: ByteArray is null");
0973: SanityManager
0974: .ASSERT(
0975: byteArray.getLength() == CONTAINER_INFO_SIZE,
0976: "setCreateContainerInfoFromLog: ByteArrays.length() != CONTAINER_INFO_SIZE");
0977: }
0978:
0979: byte[] array = byteArray.getArray();
0980:
0981: // now extract the relavent information from array - basically
0982: // duplicate the code in readHeaderFromArray
0983: ArrayInputStream inStream = new ArrayInputStream(array);
0984:
0985: int status = 0;
0986:
0987: try {
0988: inStream.setLimit(0, CONTAINER_INFO_SIZE);
0989:
0990: int fid = inStream.readInt();
0991: if (fid != formatIdInteger) {
0992: // RESOLVE: do something about this when we have > 1 container format
0993: throw StandardException.newException(
0994: SQLState.DATA_UNKNOWN_CONTAINER_FORMAT,
0995: getIdentity(), new Long(fid));
0996: }
0997:
0998: status = inStream.readInt();
0999: pageSize = inStream.readInt();
1000: spareSpace = inStream.readInt();
1001: minimumRecordSize = inStream.readInt();
1002: initialPages = inStream.readShort();
1003:
1004: } catch (IOException ioe) {
1005: throw StandardException.newException(
1006: SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
1007: }
1008:
1009: // set reusable record id property
1010: setReusableRecordIdState((status & FILE_REUSABLE_RECORDID) != 0);
1011:
1012: // sanity check to make sure we are not encoutering any
1013: // dropped Container
1014: if (SanityManager.DEBUG) {
1015: SanityManager.ASSERT((status & FILE_DROPPED) == 0
1016: && (status & FILE_COMMITTED_DROP) == 0,
1017: "cannot load a dropped container");
1018: }
1019: }
1020:
1021: /**
1022: Set container properties from the passed in createArgs.
1023: The following container properties are set:
1024:
1025: pageSize
1026: spareSpace
1027: minimumRecordSize
1028: isReusableRecordId
1029: initialPages
1030:
1031: RESOLVE - in the future setting parameters should be overridable
1032: by sub-class, e.g. one implementation of Container may require a
1033: minimum page size of 4k.
1034: */
1035: private void createInfoFromProp(Properties createArgs)
1036: throws StandardException {
1037: // Need a TransactionController to get database/service wide properties.
1038: AccessFactory af = (AccessFactory) Monitor.getServiceModule(
1039: dataFactory, AccessFactory.MODULE);
1040:
1041: // RESOLVE: sku defectid 2014
1042: TransactionController tc = (af == null) ? null : af
1043: .getTransaction(ContextService.getFactory()
1044: .getCurrentContextManager());
1045:
1046: pageSize = PropertyUtil.getServiceInt(tc, createArgs,
1047: Property.PAGE_SIZE_PARAMETER, Limits.DB2_MIN_PAGE_SIZE,
1048: Limits.DB2_MAX_PAGE_SIZE,
1049: RawStoreFactory.PAGE_SIZE_DEFAULT);
1050:
1051: // rather than throw error, just automatically set page size to
1052: // default if bad value given.
1053: if ((pageSize != 4096) && (pageSize != 8192)
1054: && (pageSize != 16384) && (pageSize != 32768)) {
1055: pageSize = RawStoreFactory.PAGE_SIZE_DEFAULT;
1056: }
1057:
1058: spareSpace = PropertyUtil.getServiceInt(tc, createArgs,
1059: RawStoreFactory.PAGE_RESERVED_SPACE_PARAMETER, 0, 100,
1060: 20);
1061:
1062: PreAllocSize = PropertyUtil
1063: .getServiceInt(tc, createArgs,
1064: RawStoreFactory.PRE_ALLOCATE_PAGE,
1065: MIN_PRE_ALLOC_SIZE, MAX_PRE_ALLOC_SIZE,
1066: DEFAULT_PRE_ALLOC_SIZE /* default */);
1067:
1068: // RESOLVE - in the future, we will allow user to set minimumRecordSize
1069: // to be larger than pageSize, when long rows are supported.
1070: if (createArgs == null) {
1071: // if the createArgs is null, then the following method call
1072: // will get the system properties from the appropriete places.
1073: // we want to make sure minimumRecrodSize is set to at least
1074: // the default value MINIMUM_RECORD_SIZE_DEFAULT (12)
1075: // as set in rawStoreFactory.
1076: minimumRecordSize = PropertyUtil.getServiceInt(
1077: tc,
1078: RawStoreFactory.MINIMUM_RECORD_SIZE_PARAMETER,
1079: RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT, // this is different from the next call
1080: // reserving 100 bytes for record/field headers
1081: (pageSize * (1 - spareSpace / 100) - 100),
1082: RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT);
1083: } else {
1084: // if the createArgs is not null, then it has already been set
1085: // by upper layer or create statement, then, we allow the minimum
1086: // value of this to be MINIMUM_RECORD_SIZE_MINIMUM (1).
1087: minimumRecordSize = PropertyUtil.getServiceInt(
1088: tc,
1089: createArgs,
1090: RawStoreFactory.MINIMUM_RECORD_SIZE_PARAMETER,
1091: RawStoreFactory.MINIMUM_RECORD_SIZE_MINIMUM, // this is different from the last call
1092: // reserving 100 bytes for record/field headers
1093: (pageSize * (1 - spareSpace / 100) - 100),
1094: RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT);
1095: }
1096:
1097: // For the following properties, do not check value set in global
1098: // properties, we only listen to what access has to say about them.
1099: //
1100: // whether or not container's recordIds can be reused
1101: // if container is to be created with a large number of pages
1102: if (createArgs != null) {
1103: String reusableRecordIdParameter = createArgs
1104: .getProperty(RawStoreFactory.PAGE_REUSABLE_RECORD_ID);
1105: if (reusableRecordIdParameter != null) {
1106: Boolean reusableRecordId = new Boolean(
1107: reusableRecordIdParameter);
1108: setReusableRecordIdState(reusableRecordId
1109: .booleanValue());
1110: }
1111:
1112: String containerInitialPageParameter = createArgs
1113: .getProperty(RawStoreFactory.CONTAINER_INITIAL_PAGES);
1114: if (containerInitialPageParameter != null) {
1115: initialPages = Short
1116: .parseShort(containerInitialPageParameter);
1117: if (initialPages > 1) {
1118: if (initialPages > RawStoreFactory.MAX_CONTAINER_INITIAL_PAGES)
1119: initialPages = RawStoreFactory.MAX_CONTAINER_INITIAL_PAGES;
1120: }
1121: }
1122: }
1123: }
1124:
1125: /**
1126: */
1127: protected boolean canUpdate() {
1128: return canUpdate;
1129: }
1130:
1131: /**
1132: Deallocate a page from the container.
1133:
1134: @param handle the container handle doing the deallocation
1135: @param page the page to be deallocated. It is latched upon entry and
1136: will be unlatched by the caller of this function
1137:
1138: @exception StandardException Cloudscape Standard error policy
1139: */
1140: protected void deallocatePage(BaseContainerHandle handle,
1141: BasePage page) throws StandardException {
1142: if (SanityManager.DEBUG) {
1143: SanityManager.ASSERT(page.isLatched(),
1144: "page is not latched");
1145: SanityManager.ASSERT(
1146: page.getPageNumber() != FIRST_ALLOC_PAGE_NUMBER,
1147: "cannot deallocate an alloc page");
1148: }
1149:
1150: long pnum = page.getPageNumber();
1151:
1152: // dealloc the page from the alloc page
1153: deallocatePagenum(handle, pnum);
1154:
1155: // mark the page as deallocated. Page should not be touched after this
1156: // the page latch is released by the BaseContainer upon return of this
1157: // method. Regardless of whether this operation is successful or not,
1158: // the page will be unlatched by BaseContainer.
1159: page.deallocatePage();
1160:
1161: }
1162:
1163: /** deallocate the page from the alloc page */
1164: private void deallocatePagenum(BaseContainerHandle handle, long pnum)
1165: throws StandardException {
1166: synchronized (allocCache) {
1167: long allocPageNum = allocCache.getAllocPageNumber(handle,
1168: pnum, firstAllocPageNumber);
1169:
1170: if (SanityManager.DEBUG) {
1171: if (allocPageNum == ContainerHandle.INVALID_PAGE_NUMBER)
1172: allocCache.dumpAllocationCache();
1173:
1174: if (allocPageNum == ContainerHandle.INVALID_PAGE_NUMBER)
1175: SanityManager
1176: .THROWASSERT("can't find alloc page for page number "
1177: + pnum);
1178: }
1179: // get the alloc page to deallocate this pnum
1180: AllocPage allocPage = (AllocPage) handle
1181: .getAllocPage(allocPageNum);
1182: if (allocPage == null) {
1183: PageKey pkey = new PageKey(identity, allocPageNum);
1184:
1185: throw StandardException.newException(
1186: SQLState.FILE_NO_ALLOC_PAGE, pkey);
1187: }
1188:
1189: try {
1190: allocCache.invalidate(allocPage, allocPageNum);
1191:
1192: // Unlatch alloc page. The page is protected by the dealloc
1193: // lock.
1194: allocPage.deallocatePage(handle, pnum);
1195: } finally {
1196: allocPage.unlatch();
1197: }
1198: }
1199: // make sure this page gets looked at when someone needs a new page
1200: if (pnum <= lastAllocatedPage) {
1201: lastAllocatedPage = pnum - 1;
1202: }
1203:
1204: }
1205:
1206: /**
1207: Compress free space from container.
1208:
1209: <BR> MT - thread aware - It is assumed that our caller (our super class)
1210: has already arranged a logical lock on page allocation to only allow a
1211: single thread through here.
1212:
1213: Compressing free space is done in allocation page units, working
1214: it's way from the end of the container to the beginning. Each
1215: loop operates on the last allocation page in the container.
1216:
1217: Freeing space in the container page involves 2 transactions, an
1218: update to an allocation page, N data pages, and possibly the delete
1219: of the allocation page.
1220: The User Transaction (UT) initiated the compress call.
1221: The Nested Top Transaction (NTT) is the transaction started by RawStore
1222: inside the compress call. This NTT is committed before compress returns.
1223: The NTT is used to access high traffic data structures such as the
1224: AllocPage.
1225:
1226: This is outline of the algorithm used in compressing the container.
1227:
1228: Until a non free page is found loop, in each loop return to the OS
1229: all space at the end of the container occupied by free pages, including
1230: the allocation page itself if all of it's pages are free.
1231:
1232: 1) Find last 2 allocation pages in container (last if there is only one).
1233: 2) invalidate the allocation information cached by the container.
1234: Without the cache no page can be gotten from the container. Pages
1235: already in the page cache are not affected. Thus by latching the
1236: allocPage and invalidating the allocation cache, this NTT blocks out
1237: all page gets from this container until it commits.
1238: 3) the allocPage determines which pages can be released to the OS,
1239: mark that in its data structure (the alloc extent). Mark the
1240: contiguous block of nallocated/free pages at the end of the file
1241: as unallocated. This change is associated with the NTT.
1242: 4) The NTT calls the OS to deallocate the space from the file. Note
1243: that the system can handle being booted and asked to get an allocated
1244: page which is past end of file, it just extends the file automatically.
1245: 5) If freeing all space on the alloc page, and there is more than one
1246: alloc page, then free the alloc page - this requires an update to the
1247: previous alloc page which the loop has kept latched also.
1248: 6) if the last alloc page was deleted, restart loop at #1
1249:
1250: All NTT latches are released before this routine returns.
1251: If we use an NTT, the caller has to commit the NTT to release the
1252: allocPage latch. If we don't use an NTT, the allocPage latch is released
1253: as this routine returns.
1254:
1255: @param ntt - the nested top transaction for the purpose of freeing space.
1256: If ntt is null, use the user transaction for allocation.
1257: #param allocHandle - the container handle opened by the ntt,
1258: use this to latch the alloc page
1259:
1260: @exception StandardException Standard Cloudscape error policy
1261: */
1262: protected void compressContainer(RawTransaction ntt,
1263: BaseContainerHandle allocHandle) throws StandardException {
1264: AllocPage alloc_page = null;
1265: AllocPage prev_alloc_page = null;
1266:
1267: if (firstAllocPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) {
1268: // no allocation pages in container, no work to do!
1269: return;
1270: }
1271:
1272: // make sure we don't execute redo recovery on any page
1273: // which is getting truncated. At this point we have an exclusive
1274: // table lock on the table, so after checkpoint no page change
1275: // can happen between checkpoint log record and compress of space.
1276: dataFactory.getRawStoreFactory().checkpoint();
1277:
1278: // block the backup, If backup is already in progress wait
1279: // for the backup to finish. Otherwise restore from the backup
1280: // can start recovery at different checkpoint and possibly
1281: // do redo on pages that are going to get truncated.
1282: ntt.blockBackup(true);
1283:
1284: try {
1285: synchronized (allocCache) {
1286: // loop until last 2 alloc pages are reached.
1287: alloc_page = (AllocPage) allocHandle
1288: .getAllocPage(firstAllocPageNumber);
1289:
1290: while (!alloc_page.isLast()) {
1291: if (prev_alloc_page != null) {
1292: // there are more than 2 alloc pages, unlatch the
1293: // earliest one.
1294: prev_alloc_page.unlatch();
1295: }
1296: prev_alloc_page = alloc_page;
1297: alloc_page = null;
1298:
1299: long nextAllocPageNumber = prev_alloc_page
1300: .getNextAllocPageNumber();
1301: long nextAllocPageOffset = prev_alloc_page
1302: .getNextAllocPageOffset();
1303:
1304: alloc_page = (AllocPage) allocHandle
1305: .getAllocPage(nextAllocPageNumber);
1306: }
1307:
1308: // invalidate cache before compress changes cached information,
1309: // while holding synchronization on cache and latch on
1310: // allocation page. This should guarantee that only new info
1311: // is seen after this operation completes.
1312: allocCache.invalidate();
1313:
1314: // reset, as pages may not exist after compress
1315: lastUnfilledPage = ContainerHandle.INVALID_PAGE_NUMBER;
1316: lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER;
1317:
1318: alloc_page.compress(ntt, this );
1319: }
1320:
1321: } finally {
1322: if (alloc_page != null) {
1323: alloc_page.unlatch();
1324: alloc_page = null;
1325: }
1326: if (prev_alloc_page != null) {
1327: prev_alloc_page.unlatch();
1328: prev_alloc_page = null;
1329: }
1330:
1331: // flush all changes to this file from cache.
1332: flushAll();
1333:
1334: // make sure all truncated pages are removed from the cache,
1335: // as it will get confused in the future if we allocate the same
1336: // page again, but find an existing copy of it in the cache -
1337: // it expects to not find new pages in the cache. Could just
1338: // get rid of truncated pages, iterface allows one page or
1339: // all pages.
1340: pageCache.discard(identity);
1341: }
1342: }
1343:
1344: /**
1345: * Get the reusable RecordId sequence number for the container.
1346: * @see BaseContainer#getReusableRecordIdSequenceNumber
1347: * @return reusable RecordId sequence number for the container.
1348: */
1349: public final long getReusableRecordIdSequenceNumber() {
1350: synchronized (this ) {
1351: return reusableRecordIdSequenceNumber;
1352: }
1353: }
1354:
1355: /**
1356: * Increment the reusable RecordId version sequence number.
1357: */
1358: protected final void incrementReusableRecordIdSequenceNumber() {
1359: final boolean readOnly = dataFactory.isReadOnly();
1360:
1361: synchronized (this ) {
1362: reusableRecordIdSequenceNumber++;
1363: if (!readOnly) {
1364: isDirty = true;
1365: }
1366: }
1367: }
1368:
1369: /**
1370: Create a new page in the container.
1371:
1372: <BR> MT - thread aware - It is assumed that our caller (our super class)
1373: has already arranged a logical lock on page allocation to only allow a
1374: single thread through here.
1375:
1376: Adding a new page involves 2 transactions and 2 pages.
1377: The User Transaction (UT) initiated the addPage call and expects a
1378: latched page (owns by the UT) to be returned.
1379: The Nested Top Transaction (NTT) is the transaction started by RawStore
1380: inside an addPage call. This NTT is committed before the page is
1381: returned. The NTT is used to accessed high traffic data structure such
1382: as the AllocPage.
1383:
1384: This is outline of the algorithm used in adding a page:
1385: 1) find or make an allocPage which can handle the addding of a new page.
1386: Latch the allocPage with the NTT.
1387: 2) invalidate the allocation information cached by the container.
1388: Without the cache no page can be gotten from the container. Pages
1389: already in the page cache is not affected. Thus by latching the
1390: allocPage and invalidating the allocation cache, this NTT blocks out
1391: all page gets from this container until it commits.
1392: 3) the allocPage determines which page can be allocated, mark that in its
1393: data structure (the alloc extent) and returns the page number of the
1394: new page. This change is associated with the NTT.
1395: 4) the NTT gets or creates the new page in the page cache (bypassing the
1396: lookup of the allocPage since that is already latched by the NTT and
1397: will deadlock).
1398: 5) the NTT initializes the page (mark it is being a VALID page).
1399: 6) the page latch is transfered to the UT from the NTT.
1400: 7) the new page is returned, latched by UT
1401:
1402: If we use an NTT, the caller has to commit the NTT to release the
1403: allocPage latch. If we don't use an NTT, the allocPage latch is released
1404: as this routine returns.
1405:
1406: @param userHandle - the container handle opened by the user transaction,
1407: use this to latch the new user page
1408: @param ntt - the nested top transaction for the purpose of allocating the new page
1409: If ntt is null, use the user transaction for allocation.
1410: #param allocHandle - the container handle opened by the ntt,
1411: use this to latch the alloc page
1412:
1413: @exception StandardException Standard Cloudscape error policy
1414: */
1415: protected BasePage newPage(BaseContainerHandle userHandle,
1416: RawTransaction ntt, BaseContainerHandle allocHandle,
1417: boolean isOverflow) throws StandardException {
1418: // NOTE: we are single threaded thru this method, see MT comment
1419:
1420: boolean useNTT = (ntt != null);
1421:
1422: // if ntt is null, use user transaction
1423: if (!useNTT)
1424: ntt = userHandle.getTransaction();
1425:
1426: long lastPage; // last allocated page
1427: long lastPreallocPage; // last pre-allcated page
1428: long pageNumber; // the page number of the new page
1429: PageKey pkey; // the identity of the new page
1430: boolean reuse; // if true, we are trying to reuse a page
1431:
1432: /* in case the page recommeded by allocPage is not committed yet, may
1433: /* need to retry a couple of times */
1434: boolean retry;
1435: int numtries = 0;
1436: long startSearch = lastAllocatedPage;
1437:
1438: AllocPage allocPage = null; // the alloc page
1439: BasePage page = null; // the new page
1440:
1441: try {
1442: do {
1443: retry = false; // we don't expect we need to retry
1444:
1445: synchronized (allocCache) {
1446: if (SanityManager.DEBUG) {
1447: SanityManager.ASSERT(ntt.getId().equals(
1448: allocHandle.getTransaction().getId()));
1449:
1450: if (useNTT)
1451: SanityManager.ASSERT(!ntt.getId()
1452: .equals(
1453: userHandle.getTransaction()
1454: .getId()));
1455: }
1456:
1457: /* find an allocation page that can handle adding a new
1458: * page.
1459: *
1460: * allocPage is unlatched when the ntt commits. The new
1461: * page is initialized by the ntt but the latch is
1462: * transfered to the user transaction before the allocPage
1463: * is unlatched. The allocPage latch prevents almost any
1464: * other reader or writer from finding the new page until
1465: * the ntt is committed and the new page is latched by the
1466: * user transaction.
1467: *
1468: * (If the page is being reused, it is possible for another
1469: * xact which kept a handle on the reused page to find the
1470: * page during the transfer UT -> NTT. If this unlikely
1471: * even occurs and the transfer fails [see code relating
1472: * to transfer below], we retry from the beginning.)
1473: *
1474: * After the NTT commits a reader (getNextPageNumber) may
1475: * get the page number of the newly allocated page and it
1476: * will wait for the new page and latch it when the user
1477: * transaction commits, aborts or unlatches the new page.
1478: * Whether the user transaction commits or aborts, the new
1479: * page stay allocated.
1480: *
1481: * RESOLVE: before NTT rolls back (or commits) the latch is
1482: * released. To repopulate the allocation cache, need to
1483: * get either the container lock on add page, or get a per
1484: * allocation page lock.
1485: *
1486: * This blocks all page read (getPage) from accessing this
1487: * alloc page in this container until the alloc page is
1488: * unlatched. Those who already have a page handle into
1489: * this container are unaffected.
1490: *
1491: * In other words, allocation blocks out reader (of any
1492: * page that is managed by this alloc page) by the latch
1493: * on the allocation page.
1494: *
1495: * Note that write page can proceed as usual.
1496: */
1497: allocPage = findAllocPageForAdd(allocHandle, ntt,
1498: startSearch);
1499:
1500: allocCache.invalidate(allocPage, allocPage
1501: .getPageNumber());
1502: }
1503:
1504: if (SanityManager.DEBUG) {
1505: if (allocPage == null)
1506: allocCache.dumpAllocationCache();
1507:
1508: SanityManager
1509: .ASSERT(allocPage != null,
1510: "findAllocPageForAdd returned a null alloc page");
1511: }
1512:
1513: //
1514: // get the next free page's number.
1515: // for case 1, page number > lastPreallocPage
1516: // for case 2, page number <= lastPage
1517: // for case 3, lastPage < page number <= lastPreallocPage
1518: //
1519: pageNumber = allocPage.nextFreePageNumber(startSearch);
1520:
1521: // need to distinguish between the following 3 cases:
1522: // 1) the page has not been allocate or initalized.
1523: // Create it in the page cache and sync it to disk.
1524: // 2) the page is being re-allocated.
1525: // We need to read it in to re-initialize it
1526: // 3) the page has been preallocated.
1527: // Create it in the page cache and don't sync it to disk
1528: //
1529: // first find out the current last initialized page and
1530: // preallocated page before the new page is added
1531: lastPage = allocPage.getLastPagenum();
1532: lastPreallocPage = allocPage.getLastPreallocPagenum();
1533:
1534: reuse = pageNumber <= lastPage;
1535:
1536: // no address translation necessary
1537: pkey = new PageKey(identity, pageNumber);
1538:
1539: if (reuse) {
1540: // if re-useing a page, make sure the deallocLock on the new
1541: // page is not held. We only need a zero duration lock on
1542: // the new page because the allocPage is latched and this
1543: // is the only thread which can be looking at this
1544: // pageNumber.
1545:
1546: RecordHandle deallocLock = BasePage
1547: .MakeRecordHandle(
1548: pkey,
1549: RecordHandle.DEALLOCATE_PROTECTION_HANDLE);
1550:
1551: if (!getDeallocLock(allocHandle, deallocLock,
1552: false /* nowait */, true /* zeroDuration */)) {
1553:
1554: // The transaction which deallocated this page has not
1555: // committed yet. Try going to some other page. If
1556: // this is the first time we fail to get the dealloc
1557: // lock, try from the beginning of the allocated page.
1558: // If we already did that and still fail, keep going
1559: // until we get a brand new page.
1560: if (numtries == 0) {
1561: startSearch = ContainerHandle.INVALID_PAGE_NUMBER;
1562: lastAllocatedPage = pageNumber;
1563: } else
1564: // continue from where we were
1565: startSearch = pageNumber;
1566:
1567: numtries++;
1568:
1569: // We have to unlatch the allocPage so that if that
1570: // transaction rolls back, it won't deadlock with this
1571: // transaction.
1572: allocPage.unlatch();
1573: allocPage = null;
1574:
1575: retry = true;
1576: } else {
1577: // we got the lock, next time start from there
1578: lastAllocatedPage = pageNumber;
1579: }
1580: } else {
1581: // we got a new page, next time, start from beginning of
1582: // the bit map again if we suspect there are some some
1583: // deallocated pages
1584: if (numtries > 0)
1585: lastAllocatedPage = ContainerHandle.INVALID_PAGE_NUMBER;
1586: else
1587: lastAllocatedPage = pageNumber;
1588: }
1589:
1590: // Retry from the beginning if necessary.
1591: if (retry)
1592: continue;
1593:
1594: // If we get past here must have (retry == false)
1595: if (SanityManager.DEBUG) {
1596: SanityManager.ASSERT(retry == false);
1597: }
1598:
1599: // Now we have verified that the allocPage is latched and we
1600: // can get the zeroDuration deallocLock nowait. This means the
1601: // transaction which freed the page has committed. Had that
1602: // transaction aborted, we would have retried.
1603:
1604: if (SanityManager.DEBUG) {
1605: // ASSERT lastPage <= lastPreallocPage
1606: if (lastPage > lastPreallocPage) {
1607: SanityManager.THROWASSERT("last page "
1608: + lastPage + " > lastPreallocPage "
1609: + lastPreallocPage);
1610: }
1611: }
1612:
1613: // No I/O at all if this new page is requested as part of a
1614: // create and load statement or this new page is in a temporary
1615: // container.
1616: //
1617: // In the former case, BaseContainer will allow the
1618: // MODE_UNLOGGED bit to go thru to the nested top transaction
1619: // alloc handle. In the later case, there is no nested top
1620: // transaction and the alloc handle is the user handle, which
1621: // is UNLOGGED.
1622: boolean noIO = (allocHandle.getMode() & ContainerHandle.MODE_UNLOGGED) == ContainerHandle.MODE_UNLOGGED;
1623:
1624: // If we do not need the I/O (either because we are in a
1625: // create_unlogged mode or we are dealing with a temp table),
1626: // don't do any preallocation. Otherwise, see if we should be
1627: // pre-Allocating page by now. We don't call it before
1628: // nextFreePageNumber because finding a reusable page may be
1629: // expensive and we don't want to start preAllocation unless
1630: // there is no more reusable page. Unless we are called
1631: // explicitly to bulk increase the container size in a preload
1632: // or in a create container.
1633: if (!noIO
1634: && (bulkIncreaseContainerSize || (pageNumber > lastPreallocPage && pageNumber > PreAllocThreshold))) {
1635: allocPage.preAllocatePage(this , PreAllocThreshold,
1636: PreAllocSize);
1637: }
1638:
1639: // update last preAllocated Page, it may have been changed by
1640: // the preAllocatePage call. We don't want to do the sync if
1641: // preAllocatePage already took care of it.
1642: lastPreallocPage = allocPage.getLastPreallocPagenum();
1643: boolean prealloced = pageNumber <= lastPreallocPage;
1644:
1645: // Argument to the create is an array of ints.
1646: // The array is only used for new page creation or for creating
1647: // a preallocated page, not for reuse.
1648: // 0'th element is the page format
1649: // 1'st element is whether or not to sync the page to disk
1650: // 2'nd element is pagesize
1651: // 3'rd element is spareSpace
1652:
1653: int[] createPageArgs = new int[STORED_PAGE_ARG_NUM];
1654: createPageArgs[0] = StoredPage.FORMAT_NUMBER;
1655: createPageArgs[1] = prealloced ? 0 : (noIO ? 0
1656: : CachedPage.WRITE_SYNC);
1657: createPageArgs[2] = pageSize;
1658: createPageArgs[3] = spareSpace;
1659: createPageArgs[4] = minimumRecordSize;
1660:
1661: // RESOLVE: right now, there is no re-mapping of pages, so
1662: // pageOffset = pageNumber*pageSize
1663: long pageOffset = pageNumber * pageSize;
1664:
1665: // initialize a new user page
1666: // we first use the NTT to initialize the new page - in case the
1667: // allocation failed, it is rolled back with the NTT.
1668: // Later, we transfer the latch to the userHandle so it won't be
1669: // released when the ntt commits
1670:
1671: try {
1672: page = initPage(allocHandle, pkey, createPageArgs,
1673: pageOffset, reuse, isOverflow);
1674: } catch (StandardException se) {
1675: if (SanityManager.DEBUG) {
1676: SanityManager.DEBUG_PRINT("FileContainer",
1677: "got exception from initPage:"
1678: + "\nreuse = " + reuse
1679: + "\ncreatePageArgs[1] = "
1680: + createPageArgs[1]
1681: + "\nallocPage = " + allocPage);
1682: }
1683: allocCache.dumpAllocationCache();
1684:
1685: throw se;
1686: }
1687:
1688: if (SanityManager.DEBUG) {
1689: SanityManager.ASSERT(page != null,
1690: "initPage returns null page");
1691: SanityManager.ASSERT(page.isLatched(),
1692: "initPage returns unlatched page");
1693: }
1694:
1695: // allocate the page in the allocation page bit map
1696: allocPage.addPage(this , pageNumber, ntt, userHandle);
1697:
1698: if (useNTT) {
1699: // transfer the page latch from NTT to UT.
1700: //
1701: // after the page is unlatched by NTT, it is still
1702: // protected from being found by almost everybody else
1703: // because the alloc page is still latched and the alloc
1704: // cache is invalidated.
1705: //
1706: // However (beetle 3942) it is possible for the page to be
1707: // found by threads who specifically ask for this
1708: // pagenumber (e.g. HeapPostCommit).
1709: // We may find that such a thread has latched the page.
1710: // We shouldn't wait for it because we have the alloc page
1711: // latch, and this could cause deadlock (e.g.
1712: // HeapPostCommit might call removePage and this would wait
1713: // on the alloc page).
1714: //
1715: // We may instead find that we can latch the page, but that
1716: // another thread has managed to get hold of it during the
1717: // transfer and either deallocate it or otherwise change it
1718: // (add rows, delete rows etc.)
1719: //
1720: // Since this doesn't happen very often, we retry in these
1721: // 2 cases (we give up the alloc page and page and we start
1722: // this method from scratch).
1723: //
1724: // If the lock manager were changed to allow latches to be
1725: // transferred between transactions, wouldn't need to
1726: // unlatch to do the transfer, and would avoid having to
1727: // retry in these cases (beetle 4011).
1728:
1729: page.unlatch();
1730: page = null;
1731:
1732: // need to find it in the cache again since unlatch also
1733: // unkept the page from the cache
1734: page = (BasePage) pageCache.find(pkey);
1735: page = latchPage(userHandle, page, false /* don't wait, it might deadlock */);
1736:
1737: if (page == null
1738: ||
1739: // recordCount will only return true if there are no
1740: // rows (including deleted rows)
1741: page.recordCount() != 0
1742: || page.getPageStatus() != BasePage.VALID_PAGE) {
1743: retry = true;
1744: if (page != null) {
1745: page.unlatch();
1746: page = null;
1747: }
1748: allocPage.unlatch();
1749: allocPage = null;
1750: }
1751:
1752: }
1753: // if ntt is null, no need to transfer. Page is latched by user
1754: // transaction already. Will be no need to retry.
1755: // the alloc page is unlatched in the finally block.
1756: } while (retry == true);
1757:
1758: // At this point, should have a page suitable for returning
1759: if (SanityManager.DEBUG)
1760: SanityManager.ASSERT(page.isLatched());
1761: } catch (StandardException se) {
1762: if (page != null)
1763: page.unlatch();
1764: page = null;
1765:
1766: throw se; // rethrow error
1767: } finally {
1768: if (!useNTT && allocPage != null) {
1769: allocPage.unlatch();
1770: allocPage = null;
1771: }
1772:
1773: // NTT is committed by the caller
1774: }
1775:
1776: if (SanityManager.DEBUG)
1777: SanityManager.ASSERT(page.isLatched());
1778:
1779: // if bulkIncreaseContainerSize is set, that means this newPage call
1780: // may have greatly expanded the container size due to preallocation.
1781: // Regardless of how many page it actually created, reset preAllocSize
1782: // to the default so we won't attempt to always preallocate 1000 pages
1783: // at a time in the future.
1784: if (bulkIncreaseContainerSize) {
1785: bulkIncreaseContainerSize = false;
1786: PreAllocSize = DEFAULT_PRE_ALLOC_SIZE;
1787: }
1788:
1789: if (!isOverflow && page != null)
1790: setLastInsertedPage(pageNumber);
1791:
1792: // increase estimated page count - without any synchronization or
1793: // logging, this is an estimate only
1794: if (estimatedPageCount >= 0)
1795: estimatedPageCount++;
1796:
1797: if (!this .identity.equals(page.getPageId().getContainerId())) {
1798:
1799: if (SanityManager.DEBUG) {
1800: SanityManager
1801: .THROWASSERT("just created a new page from a different container"
1802: + "\n this.identity = "
1803: + this .identity
1804: + "\n page.getPageId().getContainerId() = "
1805: + page.getPageId().getContainerId()
1806: + "\n userHandle is: "
1807: + userHandle
1808: + "\n allocHandle is: "
1809: + allocHandle
1810: + "\n this container is: " + this );
1811: }
1812:
1813: throw StandardException.newException(
1814: SQLState.DATA_DIFFERENT_CONTAINER, this .identity,
1815: page.getPageId().getContainerId());
1816: }
1817:
1818: return page; // return the newly added page
1819: }
1820:
1821: protected void clearPreallocThreshold() {
1822: // start life with preallocated page if possible
1823: PreAllocThreshold = 0;
1824: }
1825:
1826: protected void prepareForBulkLoad(BaseContainerHandle handle,
1827: int numPage) {
1828: clearPreallocThreshold();
1829: RawTransaction tran = handle.getTransaction();
1830:
1831: // find the last allocation page - do not invalidate the alloc cache,
1832: // we don't want to prevent other people from reading or writing
1833: // pages.
1834: AllocPage allocPage = findLastAllocPage(handle, tran);
1835:
1836: // preallocate numPages. Do whatever this allocPage can handle, if it
1837: // is full, too bad. We don't guarentee that we will preallocate this
1838: // many pages, we only promise to try.
1839: if (allocPage != null) {
1840: allocPage.preAllocatePage(this , 0, numPage);
1841: allocPage.unlatch();
1842: }
1843: }
1844:
1845: private boolean pageValid(BaseContainerHandle handle, long pagenum)
1846: throws StandardException {
1847: boolean retval = false;
1848:
1849: synchronized (allocCache) {
1850: if (pagenum <= allocCache.getLastPageNumber(handle,
1851: firstAllocPageNumber)
1852: && allocCache.getPageStatus(handle, pagenum,
1853: firstAllocPageNumber) == AllocExtent.ALLOCATED_PAGE)
1854: retval = true;
1855: }
1856:
1857: return retval;
1858: }
1859:
1860: protected long getLastPageNumber(BaseContainerHandle handle)
1861: throws StandardException {
1862: long retval;
1863: synchronized (allocCache) {
1864: // check if the first alloc page number is valid, it is invalid
1865: // if some one attempts to access the container info before the
1866: // first alloc page got created. One such case is online backup.
1867: // If first alloc page itself is invalid, then there are no pages
1868: // on the disk yet for this container, just return
1869: // ContainerHandle.INVALID_PAGE_NUMBER, caller can decide what to
1870: // do.
1871:
1872: if (firstAllocPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) {
1873: retval = ContainerHandle.INVALID_PAGE_NUMBER;
1874: } else {
1875: retval = allocCache.getLastPageNumber(handle,
1876: firstAllocPageNumber);
1877: }
1878: }
1879: return retval;
1880: }
1881:
1882: /*
1883: Find or allocate an allocation page which can handle adding a new page.
1884: Return a latched allocPage.
1885:
1886: <BR> MT - single thread required - called as part of add page
1887: */
1888: private AllocPage findAllocPageForAdd(
1889: BaseContainerHandle allocHandle, RawTransaction ntt,
1890: long lastAllocatedPage) throws StandardException {
1891: AllocPage allocPage = null;
1892: AllocPage oldAllocPage = null; // in case we need to walk the alloc page chain
1893: boolean success = false; // set this for clean up
1894:
1895: try {
1896: if (firstAllocPageNumber == ContainerHandle.INVALID_PAGE_NUMBER) {
1897: // make and return a latched new allocation page
1898: allocPage = makeAllocPage(ntt, allocHandle,
1899: FIRST_ALLOC_PAGE_NUMBER,
1900: FIRST_ALLOC_PAGE_OFFSET, CONTAINER_INFO_SIZE);
1901:
1902: if (SanityManager.DEBUG) {
1903: SanityManager
1904: .ASSERT(
1905: firstAllocPageNumber == FIRST_ALLOC_PAGE_NUMBER,
1906: "first Alloc Page number is still not set");
1907: SanityManager
1908: .ASSERT(
1909: firstAllocPageOffset == FIRST_ALLOC_PAGE_OFFSET,
1910: "first Alloc Page offset is still not set");
1911: }
1912: } else {
1913: // an allocation page already exist, go get it
1914: allocPage = (AllocPage) allocHandle
1915: .getAllocPage(firstAllocPageNumber);
1916: }
1917:
1918: /* allocPage is latched by allocHandle */
1919:
1920: if (!allocPage.canAddFreePage(lastAllocatedPage)) {
1921: // allocPage cannot manage the addition of one more page, walk the
1922: // alloc page chain till we find an allocPage that can
1923: // RESOLVE: always start with the first page for now...
1924:
1925: boolean found = false; // found an alloc page that can handle
1926: // adding a new page
1927:
1928: while (allocPage.isLast() != true) {
1929: long nextAllocPageNumber = allocPage
1930: .getNextAllocPageNumber();
1931: long nextAllocPageOffset = allocPage
1932: .getNextAllocPageOffset();
1933:
1934: // RESOLVE (future): chain this info to in memory structure so
1935: // getAllocPage can find this alloc page
1936:
1937: allocPage.unlatch();
1938: allocPage = null;
1939:
1940: // the nextAllocPage is stable once set - even though it is
1941: // save to get the next page latch before releasing this
1942: // allocPage.
1943: allocPage = (AllocPage) allocHandle
1944: .getAllocPage(nextAllocPageNumber);
1945:
1946: if (allocPage.canAddFreePage(lastAllocatedPage)) {
1947: found = true;
1948: break;
1949: }
1950: }
1951:
1952: if (!found) {
1953: // allocPage is last and it is full
1954: oldAllocPage = allocPage;
1955: allocPage = null;
1956:
1957: if (SanityManager.DEBUG)
1958: SanityManager
1959: .ASSERT(
1960: oldAllocPage.getLastPagenum() == oldAllocPage
1961: .getMaxPagenum(),
1962: "expect allocpage to be full but last pagenum != maxpagenum");
1963:
1964: long newAllocPageNum = oldAllocPage.getMaxPagenum() + 1;
1965: long newAllocPageOffset = newAllocPageNum; // no translation
1966:
1967: allocPage = makeAllocPage(ntt, allocHandle,
1968: newAllocPageNum, newAllocPageOffset, 0 /* no containerInfo */);
1969:
1970: // this writes out the new alloc page and return a latched page
1971: // nobody can find the new alloc page until oldAllocPage is unlatched.
1972:
1973: // oldAllocPage is no longer the last alloc page,
1974: // it has a pointer to the new last alloc page
1975: oldAllocPage.chainNewAllocPage(allocHandle,
1976: newAllocPageNum, newAllocPageOffset);
1977: oldAllocPage.unlatch();
1978: oldAllocPage = null;
1979: }
1980: }
1981:
1982: /* no error handling necessary */
1983: success = true;
1984: } finally // unlatch allocation page if any error happened
1985: {
1986: if (!success) {
1987: if (oldAllocPage != null)
1988: oldAllocPage.unlatch();
1989:
1990: if (allocPage != null)
1991: allocPage.unlatch();
1992:
1993: allocPage = null;
1994: }
1995:
1996: // if success drop out of finally block
1997: }
1998:
1999: return allocPage;
2000: }
2001:
2002: /**
2003: Find the last alloc page, returns null if no alloc page is found
2004: */
2005: private AllocPage findLastAllocPage(BaseContainerHandle handle,
2006: RawTransaction tran) {
2007: AllocPage allocPage = null;
2008: AllocPage oldAllocPage = null;
2009:
2010: if (firstAllocPageNumber == ContainerHandle.INVALID_PAGE_NUMBER)
2011: return null;
2012:
2013: try {
2014: allocPage = (AllocPage) handle
2015: .getAllocPage(firstAllocPageNumber);
2016: while (!allocPage.isLast()) {
2017: long nextAllocPageNumber = allocPage
2018: .getNextAllocPageNumber();
2019: long nextAllocPageOffset = allocPage
2020: .getNextAllocPageOffset();
2021:
2022: allocPage.unlatch();
2023: allocPage = null;
2024:
2025: allocPage = (AllocPage) handle
2026: .getAllocPage(nextAllocPageNumber);
2027: }
2028: } catch (StandardException se) {
2029: if (allocPage != null)
2030: allocPage.unlatch();
2031: allocPage = null;
2032: }
2033:
2034: return allocPage;
2035:
2036: }
2037:
2038: /*
2039: Make a new alloc page, latch it with the passed in container handle.
2040: */
2041: private AllocPage makeAllocPage(RawTransaction ntt,
2042: BaseContainerHandle handle, long pageNumber,
2043: long pageOffset, int containerInfoSize)
2044: throws StandardException {
2045: if (SanityManager.DEBUG) {
2046: if (containerInfoSize != 0
2047: && containerInfoSize != CONTAINER_INFO_SIZE)
2048: SanityManager.THROWASSERT("expect 0 or "
2049: + CONTAINER_INFO_SIZE + ", got "
2050: + containerInfoSize);
2051:
2052: if (pageNumber != FIRST_ALLOC_PAGE_NUMBER
2053: && containerInfoSize != 0)
2054: SanityManager
2055: .THROWASSERT("Not first alloc page but container info size "
2056: + containerInfoSize);
2057: }
2058:
2059: // argument to the create is an array of ints
2060: // 0'th element is the page format
2061: // 1'st element is whether or not to sync the page to disk
2062: // 2'nd element is the pagesize
2063: // 3'rd element is spareSpace
2064: // 4'th element is number of bytes to reserve for the container header
2065: // 5'th element is the minimumRecordSize
2066: // NOTE: the arg list here must match the one in allocPage
2067:
2068: // No I/O at all if this new page is requested as part of a create
2069: // and load statement or this new alloc page is in a temporary
2070: // container.
2071: // In the former case, BaseContainer will allow the MODE_UNLOGGED
2072: // bit to go thru to the nested top transaction alloc handle.
2073: // In the later case, there is no nested top transaction and the
2074: // alloc handle is the user handle, which is UNLOGGED.
2075:
2076: boolean noIO = (handle.getMode() & ContainerHandle.MODE_UNLOGGED) == ContainerHandle.MODE_UNLOGGED;
2077:
2078: int[] createAllocPageArgs = new int[ALLOC_PAGE_ARG_NUM];
2079: createAllocPageArgs[0] = AllocPage.FORMAT_NUMBER;
2080: createAllocPageArgs[1] = noIO ? 0 : CachedPage.WRITE_SYNC;
2081: createAllocPageArgs[2] = pageSize;
2082: createAllocPageArgs[3] = 0; // allocation page has no need for spare
2083: createAllocPageArgs[4] = containerInfoSize;
2084: createAllocPageArgs[5] = minimumRecordSize;
2085:
2086: if (SanityManager.DEBUG) {
2087: if (SanityManager.DEBUG_ON(SPACE_TRACE)) {
2088: SanityManager.DEBUG(SPACE_TRACE,
2089: "making new allocation page at " + pageNumber);
2090: }
2091: }
2092:
2093: if (pageNumber == FIRST_ALLOC_PAGE_NUMBER) {
2094: // RESOLVE: make sure the following is true
2095: //
2096: // firstAllocPageNumber and Offset can be set and access without
2097: // synchronization since the first allocation page is
2098: // created as part of the container create, this value is set
2099: // before any other transaction has a chance to open the container.
2100: // Once set, the first allocation page does not move or change
2101: // position
2102: firstAllocPageNumber = pageNumber;
2103: firstAllocPageOffset = pageOffset;
2104:
2105: }
2106:
2107: PageKey pkey = new PageKey(identity, pageNumber);
2108:
2109: // return a latched new alloc page
2110: return (AllocPage) initPage(handle, pkey, createAllocPageArgs,
2111: pageOffset, false, /* not reuse */
2112: false /* not overflow */);
2113: }
2114:
2115: /**
2116: Initialize a page
2117:
2118: @return a latched page that has been initialized.
2119:
2120: @param allochandle the contianer handle to initialize the page with - the ntt
2121: @param pkey the page number of the page to be initialized
2122: @param createArgs the int array for page creation
2123: @param reuse is true if we are reusing a page that has
2124: already been initialized once
2125:
2126: @exception StandardException Cloudscape Standard error policy
2127: */
2128: protected BasePage initPage(BaseContainerHandle allochandle,
2129: PageKey pkey, int[] createArgs, long pageOffset,
2130: boolean reuse, boolean overflow) throws StandardException {
2131: BasePage page = null;
2132:
2133: boolean releasePage = true;
2134:
2135: try {
2136: if (reuse) // read the page in first
2137: {
2138: // Cannot go thru the container handle because all read pages are blocked.
2139: // do it underneath the handle and directly to the cache.
2140: // Nobody can get thru becuase getPage will block at getting the alloc page.
2141:
2142: if (SanityManager.DEBUG) {
2143: if (SanityManager.DEBUG_ON(SPACE_TRACE)) {
2144: SanityManager.DEBUG(SPACE_TRACE,
2145: "reusing page " + pkey);
2146: }
2147: }
2148:
2149: page = (BasePage) pageCache.find(pkey);
2150: if (page == null) // hmmm?
2151: {
2152: throw StandardException.newException(
2153: SQLState.FILE_REUSE_PAGE_NOT_FOUND, pkey);
2154: }
2155: } else {
2156: if (SanityManager.DEBUG) {
2157: if (SanityManager.DEBUG_ON(SPACE_TRACE)) {
2158: SanityManager.DEBUG(SPACE_TRACE,
2159: "allocation new page " + pkey);
2160: }
2161: }
2162:
2163: // a brand new page, initialize and a new page in cache
2164: page = (BasePage) pageCache.create(pkey, createArgs);
2165:
2166: if (SanityManager.DEBUG)
2167: SanityManager.ASSERT(page != null,
2168: "page Cache create return a null page");
2169: }
2170: releasePage = false;
2171: page = latchPage(allochandle, page, true /* may need to wait, track3822 */);
2172:
2173: if (page == null) {
2174: throw StandardException.newException(
2175: SQLState.FILE_NEW_PAGE_NOT_LATCHED, pkey);
2176: }
2177:
2178: // page is either brand new or is read from disk, in either case,
2179: // it knows how to get itself initialized.
2180: int initPageFlag = 0;
2181: if (reuse)
2182: initPageFlag |= BasePage.INIT_PAGE_REUSE;
2183: if (overflow)
2184: initPageFlag |= BasePage.INIT_PAGE_OVERFLOW;
2185: if (reuse && isReusableRecordId())
2186: initPageFlag |= BasePage.INIT_PAGE_REUSE_RECORDID;
2187:
2188: page.initPage(initPageFlag, pageOffset);
2189: page.setContainerRowCount(estimatedRowCount);
2190:
2191: } finally {
2192: if (releasePage && page != null) {
2193: // release the new page from cache if it errors
2194: // out before the exclusive lock is set
2195: pageCache.release((Cacheable) page);
2196: page = null;
2197: }
2198: }
2199:
2200: return page;
2201: }
2202:
2203: /**
2204: Get a page in the container.
2205:
2206: Get User page is the generic base routine for all user (client to raw
2207: store) getPage. This routine coordinate with allocation/deallocation
2208: to ensure that no page can be gotten from the container while page is
2209: in the middle of being allocated or deallocated.
2210: This routine latches the page.
2211:
2212: @param handle the container handle
2213: @param pageNumber the page number of the page to get
2214: @param overflowOK if true then an overflow page is OK,
2215: if false, then only non-overflow page is OK
2216: @param wait if true then wait for a latch
2217: @return the latched page
2218:
2219: <BR> MT - thread safe
2220:
2221: @exception StandardException Standard Cloudscape error policy
2222: */
2223: private BasePage getUserPage(BaseContainerHandle handle,
2224: long pageNumber, boolean overflowOK, boolean wait)
2225: throws StandardException {
2226:
2227: if (SanityManager.DEBUG) {
2228: SanityManager.ASSERT(pageNumber != FIRST_ALLOC_PAGE_NUMBER,
2229: "getUserPage trying to get an alloc page, pageNumber = "
2230: + pageNumber);
2231:
2232: if (pageNumber < ContainerHandle.FIRST_PAGE_NUMBER)
2233: SanityManager.THROWASSERT("pageNumber = " + pageNumber);
2234: }
2235:
2236: if (pageNumber < ContainerHandle.FIRST_PAGE_NUMBER)
2237: return null;
2238:
2239: if (getCommittedDropState()) // committed and dropped, cannot get a page
2240: return null;
2241:
2242: if (!pageValid(handle, pageNumber)) {
2243: return null;
2244: }
2245:
2246: // RESOLVE: no translation!
2247:
2248: PageKey pageSearch = new PageKey(identity, pageNumber);
2249: BasePage page = (BasePage) pageCache.find(pageSearch);
2250:
2251: if (page == null) {
2252: return page;
2253: }
2254:
2255: // latch the page
2256: if (latchPage(handle, page, wait) == null) {
2257: // page was already released from cache
2258: return null;
2259: }
2260:
2261: // double check for overflow and deallocated page
2262: // a page that was valid before maybe invalid by now if it was
2263: // deallocated in the interum.
2264: // a page that is invalid can also become valid in the interim, but
2265: // we do not handle that. The client must supply other locking
2266: // mechanism to prevent that (an allocatino happenning where there are
2267: // readers) if that is needed
2268: if ((page.isOverflowPage() && !overflowOK)
2269: || (page.getPageStatus() != BasePage.VALID_PAGE)) {
2270: // unlatch releases page from cache, see StoredPage.releaseExclusive()
2271: page.unlatch();
2272: page = null;
2273: }
2274:
2275: return page;
2276: }
2277:
2278: protected void trackUnfilledPage(long pagenumber, boolean unfilled) {
2279: if (!dataFactory.isReadOnly())
2280: allocCache.trackUnfilledPage(pagenumber, unfilled);
2281: }
2282:
2283: /**
2284: Get a valid (non-deallocated or free) page in the container.
2285: Overflow page is OK. Resulting page is latched.
2286:
2287: <BR> MT - thread safe
2288:
2289: @exception StandardException Standard Cloudscape error policy
2290: */
2291: protected BasePage getPage(BaseContainerHandle handle,
2292: long pageNumber, boolean wait) throws StandardException {
2293: return getUserPage(handle, pageNumber,
2294: true /* overflow page OK */, wait);
2295: }
2296:
2297: /**
2298: Get any old page - turn off all validation
2299:
2300: @exception StandardException Cloudscape Standard error policy
2301: */
2302: protected BasePage getAnyPage(BaseContainerHandle handle,
2303: long pageNumber) throws StandardException {
2304: // get AllocPage get a page without any validation (exception a
2305: // committed dropped container)
2306:
2307: if (getCommittedDropState()) // committed and dropped, cannot get a page
2308: return null;
2309:
2310: // make sure alloc cache has no stale info
2311: synchronized (allocCache) {
2312: allocCache.invalidate();
2313: }
2314:
2315: PageKey pageSearch = new PageKey(identity, pageNumber);
2316: BasePage page = (BasePage) pageCache.find(pageSearch);
2317:
2318: return page;
2319: }
2320:
2321: /**
2322: * ReCreate a page for rollforward recovery.
2323: * <p>
2324: * During redo recovery it is possible for the system to try to redo
2325: * the creation of a page (ie. going from non-existence to version 0).
2326: * It first trys to read the page from disk, but a few different types
2327: * of errors can occur:
2328: * o the page does not exist at all on disk, this can happen during
2329: * rollforward recovery applied to a backup where the file was
2330: * copied and the page was added to the file during the time frame
2331: * of the backup but after the physical file was copied.
2332: * o space in the file exists, but it was never initalized. This
2333: * can happen if you happen to crash at just the right moment during
2334: * the allocation process. Also
2335: * on some OS's it is possible to read from a part of the file that
2336: * was not ever written - resulting in garbage from the store's
2337: * point of view (often the result is all 0's).
2338: *
2339: * All these errors are easy to recover from as the system can easily
2340: * create a version 0 from scratch and write it to disk.
2341: *
2342: * Because the system does not sync allocation of data pages, it is also
2343: * possible at this point that whlie writing the version 0 to disk to
2344: * create it we may encounter an out of disk space error (caught in this
2345: * routine as a StandardException from the create() call. We can't
2346: * recovery from this without help from outside, so the caught exception
2347: * is nested and a new exception thrown which the recovery system will
2348: * output to the user asking them to check their disk for space/errors.
2349: *
2350: * @exception StandardException Standard exception policy.
2351: **/
2352: protected BasePage reCreatePageForRedoRecovery(
2353: BaseContainerHandle handle, int pageFormat,
2354: long pageNumber, long pageOffset) throws StandardException {
2355: // recreating a page should be done only if are in the middle of
2356: // rollforward recovery or if derby.storage.patchInitPageRecoverError
2357: // is set to true.
2358:
2359: //check if we are in rollforward recovery
2360: boolean rollForwardRecovery = ((RawTransaction) handle
2361: .getTransaction()).inRollForwardRecovery();
2362:
2363: if (!rollForwardRecovery
2364: && !(PropertyUtil
2365: .getSystemBoolean(RawStoreFactory.PATCH_INITPAGE_RECOVER_ERROR))) {
2366: return null;
2367: }
2368:
2369: // RESOLVE: first need to verify that the page is really NOT in the
2370: // container!
2371:
2372: // no address translation necessary
2373: PageKey pkey = new PageKey(identity, pageNumber);
2374:
2375: int[] reCreatePageArgs = null;
2376:
2377: if (pageFormat == StoredPage.FORMAT_NUMBER) {
2378: reCreatePageArgs = new int[STORED_PAGE_ARG_NUM];
2379: reCreatePageArgs[0] = pageFormat;
2380: reCreatePageArgs[1] = CachedPage.WRITE_SYNC;
2381: reCreatePageArgs[2] = pageSize;
2382: reCreatePageArgs[3] = spareSpace;
2383: reCreatePageArgs[4] = minimumRecordSize;
2384: } else if (pageFormat == AllocPage.FORMAT_NUMBER) {
2385: reCreatePageArgs = new int[ALLOC_PAGE_ARG_NUM];
2386:
2387: // only the first allocation page have borrowed space for the
2388: // container info
2389:
2390: int containerInfoSize = 0;
2391: if (pageNumber == FIRST_ALLOC_PAGE_NUMBER) {
2392: containerInfoSize = CONTAINER_INFO_SIZE;
2393: firstAllocPageNumber = pageNumber;
2394: firstAllocPageOffset = pageOffset;
2395: }
2396:
2397: reCreatePageArgs[0] = pageFormat;
2398: reCreatePageArgs[1] = CachedPage.WRITE_SYNC;
2399: reCreatePageArgs[2] = pageSize;
2400: reCreatePageArgs[3] = 0; // allocation page has no need for spare
2401: reCreatePageArgs[4] = containerInfoSize;
2402: reCreatePageArgs[5] = minimumRecordSize;
2403: } else {
2404: throw StandardException.newException(
2405: SQLState.DATA_UNKNOWN_PAGE_FORMAT, pkey);
2406: }
2407:
2408: if (SanityManager.DEBUG) {
2409: if (SanityManager.DEBUG_ON("LoadTran"))
2410: SanityManager.DEBUG_PRINT("Trace", "recreating page "
2411: + pkey + " for load tran");
2412: }
2413:
2414: // Can't just call initPage because that wants to log an initPage
2415: // operation, whereas we are here because of an initPage operation in
2416: // the log already.
2417: BasePage page = null;
2418: boolean releasePage = true;
2419:
2420: try {
2421: try {
2422: // a brand new page, initialize a new page in cache
2423: page = (BasePage) pageCache.create(pkey,
2424: reCreatePageArgs);
2425: } catch (StandardException se) {
2426: throw StandardException.newException(
2427: SQLState.FILE_NEW_PAGE_DURING_RECOVERY, se,
2428: pkey);
2429: }
2430:
2431: if (page != null) {
2432: releasePage = false;
2433: page = latchPage(handle, page, false /* never need to wait */);
2434:
2435: if (page == null) {
2436: throw StandardException.newException(
2437: SQLState.FILE_NEW_PAGE_NOT_LATCHED, pkey);
2438: }
2439: } else {
2440: throw StandardException.newException(
2441: SQLState.FILE_NEW_PAGE_DURING_RECOVERY, pkey);
2442: }
2443:
2444: } finally {
2445: if (releasePage && page != null) {
2446: // release the new page from cache if it errors out before
2447: // the exclusive lock is set error in roll forward recovery.
2448: // , we are doomed anyway
2449: pageCache.release((Cacheable) page);
2450: page = null;
2451: }
2452: }
2453:
2454: return page;
2455:
2456: }
2457:
2458: /**
2459: Get an alloc page - only accessible to the raw store
2460: (container and recovery)
2461:
2462: @exception StandardException Cloudscape Standard error policy
2463: */
2464: protected BasePage getAllocPage(long pageNumber)
2465: throws StandardException {
2466: if (getCommittedDropState()) // committed and dropped, cannot get a page
2467: return null;
2468:
2469: PageKey pageSearch = new PageKey(identity, pageNumber);
2470: BasePage page = (BasePage) pageCache.find(pageSearch);
2471:
2472: if (SanityManager.DEBUG) {
2473: if (page == null)
2474: SanityManager
2475: .THROWASSERT("getting a null alloc page page "
2476: + getIdentity() + pageNumber);
2477:
2478: if (!(page instanceof AllocPage))
2479: SanityManager
2480: .THROWASSERT("trying to get a user page as an alloc page "
2481: + getIdentity() + pageNumber);
2482: }
2483:
2484: // assuming that allocation page lives in the page cache...
2485: return page;
2486: }
2487:
2488: /**
2489: Get only a valid, non-overflow page. If page number is either invalid
2490: or overflow, returns null
2491:
2492: @exception StandardException Cloudscape Standard error policy
2493: */
2494: protected BasePage getHeadPage(BaseContainerHandle handle,
2495: long pageNumber, boolean wait) throws StandardException {
2496: return getUserPage(handle, pageNumber,
2497: false /* overflow not ok */, wait);
2498: }
2499:
2500: /**
2501: Get the first valid page in the container
2502:
2503: @exception StandardException Cloudscape Standard error policy
2504: */
2505: protected BasePage getFirstHeadPage(BaseContainerHandle handle,
2506: boolean wait) throws StandardException {
2507: return getNextHeadPage(handle,
2508: ContainerHandle.FIRST_PAGE_NUMBER - 1, wait);
2509: }
2510:
2511: /**
2512: Get the next page in the container.
2513: @exception StandardException Standard Cloudscape error policy
2514: */
2515: protected BasePage getNextHeadPage(BaseContainerHandle handle,
2516: long pageNumber, boolean wait) throws StandardException {
2517: long nextNumber;
2518:
2519: while (true) {
2520: synchronized (allocCache) {
2521: // ask the cache for the next pagenumber
2522: nextNumber = allocCache.getNextValidPage(handle,
2523: pageNumber, firstAllocPageNumber);
2524: }
2525:
2526: if (nextNumber == ContainerHandle.INVALID_PAGE_NUMBER)
2527: return null;
2528:
2529: // optimistically go for the next page
2530: BasePage p = getUserPage(handle, nextNumber,
2531: false /* no overflow page*/, wait);
2532: if (p != null)
2533: return p;
2534:
2535: pageNumber = nextNumber;
2536: }
2537: }
2538:
2539: private BasePage getInsertablePage(BaseContainerHandle handle,
2540: long pageNumber, boolean wait, boolean overflowOK)
2541: throws StandardException {
2542: if (pageNumber == ContainerHandle.INVALID_PAGE_NUMBER)
2543: return null;
2544:
2545: BasePage p = getUserPage(handle, pageNumber, overflowOK, wait);
2546: if (p != null) {
2547: // make sure the page is not too full
2548: if (!p.allowInsert()) {
2549: p.unlatch();
2550: p = null;
2551:
2552: // it is too full, make sure we are tracking it so we won't
2553: // see it again.
2554: allocCache.trackUnfilledPage(pageNumber, false);
2555: }
2556: }
2557: /*
2558: RESOLVE track 3757
2559: Need to check if this fix resolves the bug.
2560: This is commented out because we can't conclude here that this is not
2561: a user page, it may just be that we failed to get a latch on the page.
2562: In a high contention scenario this could cause alot of relatively empty
2563: pages to not be considered for insert.
2564: TODO
2565: May be a good idea to move the trackUnfilledPage call below to some of
2566: the lines in the getUserPage method.
2567:
2568: else
2569: {
2570: // it is not a user page, make sure we are tracking its fillness so
2571: // we won't consider it as a 1/2 filled page ever
2572: allocCache.trackUnfilledPage(pageNumber, false);
2573: }
2574: */
2575: return p;
2576: }
2577:
2578: /**
2579: * Get candidate page to move a row for compressing the table.
2580: * <p>
2581: * The caller is moving rows from the end of the table toward the beginning,
2582: * with the goal of freeing up a block of empty pages at the end of the
2583: * container which can be returned to the OS.
2584: * <p>
2585: * On entry pageno will be latched by the caller. Only return pages with
2586: * numbers below pageno. Attempting to return pageno will result in a
2587: * latch/latch deadlock on the same thread.
2588: *
2589: * @exception StandardException Standard exception policy.
2590: **/
2591: protected BasePage getPageForCompress(BaseContainerHandle handle,
2592: int flag, long pageno) throws StandardException {
2593: BasePage p = null;
2594: boolean getLastInserted = (flag & ContainerHandle.GET_PAGE_UNFILLED) == 0;
2595:
2596: if (getLastInserted) {
2597: // There is nothing protecting lastInsertePage from being changed
2598: // by another thread. Make a local copy.
2599: long localLastInsertedPage = getLastInsertedPage();
2600:
2601: if ((localLastInsertedPage < pageno)
2602: && (localLastInsertedPage != ContainerHandle.INVALID_PAGE_NUMBER)) {
2603: // First try getting last inserted page.
2604:
2605: p = getInsertablePage(handle, localLastInsertedPage,
2606: true, /* wait */
2607: false /* no overflow page */);
2608:
2609: // if localLastInsertedPage is not an insertable page,
2610: // don't waste time getting it again.
2611: if (p == null) {
2612: // There is a slight possibility that lastUnfilledPage and
2613: // lastInsertedPage will change between the if and the
2614: // assignment. The worse that will happen is we lose the
2615: // optimization. Don't want to slow down allocation by
2616: // adding more synchronization.
2617:
2618: if (localLastInsertedPage == getLastUnfilledPage())
2619: setLastUnfilledPage(ContainerHandle.INVALID_PAGE_NUMBER);
2620:
2621: if (localLastInsertedPage == getLastInsertedPage())
2622: setLastInsertedPage(ContainerHandle.INVALID_PAGE_NUMBER);
2623: }
2624: }
2625: } else {
2626: // get a relatively unfilled page that is not the last Inserted page
2627:
2628: long localLastUnfilledPage = getLastUnfilledPage();
2629:
2630: if (localLastUnfilledPage == ContainerHandle.INVALID_PAGE_NUMBER
2631: || localLastUnfilledPage >= pageno
2632: || localLastUnfilledPage == getLastInsertedPage()) {
2633: // get an unfilled page, searching from beginning of container.
2634: localLastUnfilledPage = getUnfilledPageNumber(handle, 0);
2635: }
2636:
2637: if ((localLastUnfilledPage != ContainerHandle.INVALID_PAGE_NUMBER)
2638: && (localLastUnfilledPage < pageno)) {
2639: p = getInsertablePage(handle, localLastUnfilledPage,
2640: true, false);
2641: }
2642:
2643: // return this page for insert
2644: if (p != null) {
2645: setLastUnfilledPage(localLastUnfilledPage);
2646: setLastInsertedPage(localLastUnfilledPage);
2647: }
2648: }
2649:
2650: return p;
2651: }
2652:
2653: /**
2654: Get a potentially suitable page for insert and latch it.
2655: @exception StandardException Standard Cloudscape error policy
2656: */
2657: protected BasePage getPageForInsert(BaseContainerHandle handle,
2658: int flag) throws StandardException {
2659: BasePage p = null;
2660: boolean getLastInserted = (flag & ContainerHandle.GET_PAGE_UNFILLED) == 0;
2661:
2662: if (getLastInserted) {
2663: // There is nothing protecting lastInsertePage from being changed
2664: // by another thread. Make a local copy.
2665: long localLastInsertedPage = getLastInsertedPage();
2666:
2667: if (localLastInsertedPage != ContainerHandle.INVALID_PAGE_NUMBER) {
2668: // First try getting last allocated page, NOWAIT
2669:
2670: p = getInsertablePage(handle, localLastInsertedPage,
2671: false, /* wait */
2672: false /* no overflow page */);
2673:
2674: if (p == null) {
2675: // most likely we could not get the latch NOWAIT, try again
2676: // with a new page, and tell the system to switch to
2677: // multi-page mode.
2678: /* switchToMultiInsertPageMode(handle); */
2679:
2680: localLastInsertedPage = getLastInsertedPage();
2681:
2682: p = getInsertablePage(handle,
2683: localLastInsertedPage, true, /* wait */
2684: false /* no overflow page */);
2685: }
2686: }
2687:
2688: // if lastUnfilledPage is not an insertable page, don't waste time
2689: // getting it again.
2690: if (p == null) {
2691: // There is a slight possibility that lastUnfilledPage and
2692: // lastInsertedPage will change between the if and the
2693: // assignment. The worse that will happen is we lose the
2694: // optimization. Don't want to slow down allocation by adding
2695: // more synchronization.
2696:
2697: if (localLastInsertedPage == getLastUnfilledPage())
2698: setLastUnfilledPage(ContainerHandle.INVALID_PAGE_NUMBER);
2699:
2700: if (localLastInsertedPage == getLastInsertedPage())
2701: setLastInsertedPage(ContainerHandle.INVALID_PAGE_NUMBER);
2702: }
2703: } else // get a relatively unfilled page that is not
2704: { // the last Inserted page
2705: long localLastUnfilledPage = getLastUnfilledPage();
2706:
2707: if (localLastUnfilledPage == ContainerHandle.INVALID_PAGE_NUMBER
2708: || localLastUnfilledPage == getLastInsertedPage())
2709: localLastUnfilledPage = getUnfilledPageNumber(handle,
2710: localLastUnfilledPage);
2711:
2712: if (localLastUnfilledPage != ContainerHandle.INVALID_PAGE_NUMBER) {
2713: // try the last unfilled page we found - this could be
2714: // different from lastInserted if the last unfilled one we
2715: // found does not have enough space for the insert and the
2716: // client wants to get a brand new page.
2717: p = getInsertablePage(handle, localLastUnfilledPage,
2718: true, false);
2719:
2720: // try again
2721: if (p == null) {
2722: localLastUnfilledPage = getUnfilledPageNumber(
2723: handle, localLastUnfilledPage);
2724: if (localLastUnfilledPage != ContainerHandle.INVALID_PAGE_NUMBER) {
2725: p = getInsertablePage(handle,
2726: localLastUnfilledPage, true, false);
2727: }
2728: }
2729: }
2730:
2731: // return this page for insert
2732: if (p != null) {
2733: setLastUnfilledPage(localLastUnfilledPage);
2734: setLastInsertedPage(localLastUnfilledPage);
2735: }
2736: }
2737:
2738: return p;
2739:
2740: }
2741:
2742: /**
2743: * Get a latched page. Incase of backup page Latch is necessary to
2744: * prevent modification to the page when it is being written to the backup.
2745: * Backup process relies on latches to get consistent snap
2746: * shot of the page , user level table/page/row locks are NOT
2747: * acquired by the online backup mechanism.
2748: *
2749: * @param handle the container handle used to latch the page
2750: * @param pageNumber the page number of the page to get
2751: * @return the latched page
2752: * @exception StandardException Standard Derby error policy
2753: */
2754: protected BasePage getLatchedPage(BaseContainerHandle handle,
2755: long pageNumber) throws StandardException {
2756: PageKey pageKey = new PageKey(identity, pageNumber);
2757: BasePage page = (BasePage) pageCache.find(pageKey);
2758:
2759: if (SanityManager.DEBUG) {
2760: SanityManager.ASSERT(page != null, "page is not found :"
2761: + pageKey);
2762: }
2763:
2764: // latch the page
2765: page = latchPage(handle, page, true);
2766:
2767: if (SanityManager.DEBUG) {
2768: SanityManager.ASSERT(page.isLatched(),
2769: "page is not latched:" + pageKey);
2770: }
2771:
2772: return page;
2773: }
2774:
2775: private long getUnfilledPageNumber(BaseContainerHandle handle,
2776: long pagenum) throws StandardException {
2777: synchronized (allocCache) {
2778: return allocCache.getUnfilledPageNumber(handle,
2779: firstAllocPageNumber, pagenum);
2780: }
2781: }
2782:
2783: /*
2784: Cost estimates
2785: */
2786: /**
2787: <BR>MT - this routine is NOT MT-safe and clients don't need to provide
2788: synchronization.
2789:
2790: @see ContainerHandle#getEstimatedRowCount
2791: */
2792: public long getEstimatedRowCount(int flag) {
2793: return estimatedRowCount;
2794: }
2795:
2796: /**
2797: @see ContainerHandle#setEstimatedRowCount
2798: */
2799: public void setEstimatedRowCount(long count, int flag) {
2800: boolean readOnly = dataFactory.isReadOnly();
2801:
2802: synchronized (this ) {
2803: estimatedRowCount = count;
2804:
2805: if (!readOnly)
2806: isDirty = true;
2807: }
2808: }
2809:
2810: /**
2811: Update estimated row count by page as it leaves the cache.
2812: The estimated row count is updated without logging!
2813: */
2814: protected void updateEstimatedRowCount(int delta) {
2815: boolean readOnly = dataFactory.isReadOnly();
2816:
2817: synchronized (this ) {
2818: estimatedRowCount += delta;
2819: if (estimatedRowCount < 0)
2820: estimatedRowCount = 0;
2821:
2822: // mark the container as dirty without bumping the container
2823: // version because row count changes are not logged.
2824: if (!readOnly)
2825: isDirty = true;
2826: }
2827: }
2828:
2829: /**
2830: @see ContainerHandle#getEstimatedPageCount
2831: @exception StandardException Standard Cloudscape error policy
2832: */
2833: public long getEstimatedPageCount(BaseContainerHandle handle,
2834: int flag) throws StandardException {
2835: // page count is set once per container materialization in cache
2836:
2837: if (estimatedPageCount < 0) {
2838: synchronized (allocCache) {
2839: estimatedPageCount = allocCache.getEstimatedPageCount(
2840: handle, firstAllocPageNumber);
2841: }
2842: }
2843:
2844: if (SanityManager.DEBUG)
2845: SanityManager.ASSERT(estimatedPageCount >= 0,
2846: "AllocCache returns negatie estimatedPageCount");
2847:
2848: return estimatedPageCount;
2849: }
2850:
2851: /*
2852: ** Methods used solely by StoredPage
2853: */
2854:
2855: /**
2856: Read a page into the supplied array.
2857:
2858: <BR> MT - thread safe
2859: @exception IOException error reading page
2860: @exception StandardException standard cloudscape error message
2861: */
2862: protected abstract void readPage(long pageNumber, byte[] pageData)
2863: throws IOException, StandardException;
2864:
2865: /**
2866: Write a page from the supplied array.
2867:
2868: <BR> MT - thread safe
2869: @exception IOException error writing page
2870: @exception StandardException Standard Cloudscape error policy
2871: */
2872: protected abstract void writePage(long pageNumber, byte[] pageData,
2873: boolean syncPage) throws IOException, StandardException;
2874:
2875: /*
2876: * Encryption/decryption
2877: */
2878: /**
2879: Decrypts a page
2880:
2881: <BR>MT - MT safe.
2882:
2883: @exception StandardException Standard Cloudscape error policy
2884: */
2885: protected void decryptPage(byte[] pageData, int pageSize)
2886: throws StandardException {
2887: // because all our page header looks identical, the
2888: // checksum is moved to the front so that it will hopefully
2889: // encrypt differently from page to page
2890: synchronized (this ) {
2891: if (encryptionBuffer == null
2892: || encryptionBuffer.length < pageSize)
2893: encryptionBuffer = new byte[pageSize];
2894:
2895: int len = dataFactory.decrypt(pageData, 0, pageSize,
2896: encryptionBuffer, 0);
2897:
2898: if (SanityManager.DEBUG)
2899: SanityManager.ASSERT(len == pageSize,
2900: "Encrypted page length != page length");
2901:
2902: // put the checksum where it belongs
2903: System.arraycopy(encryptionBuffer, 8, pageData, 0,
2904: pageSize - 8);
2905: System.arraycopy(encryptionBuffer, 0, pageData,
2906: pageSize - 8, 8);
2907: }
2908: }
2909:
2910: /**
2911: Encrypts a page.
2912:
2913: <BR> MT - not safe, call within synchronized block and only use the
2914: returned byte array withing synchronized block.
2915:
2916: @exception StandardException Standard Cloudscape error policy
2917: */
2918: protected byte[] encryptPage(byte[] pageData, int pageSize,
2919: byte[] encryptionBuffer, boolean newEngine)
2920: throws StandardException {
2921: // because all our page header looks identical, move the
2922: // checksum to the front so that it will hopefully encrypt
2923: // differently from page to page
2924:
2925: System
2926: .arraycopy(pageData, pageSize - 8, encryptionBuffer, 0,
2927: 8);
2928: System
2929: .arraycopy(pageData, 0, encryptionBuffer, 8,
2930: pageSize - 8);
2931:
2932: int len = dataFactory.encrypt(encryptionBuffer, 0, pageSize,
2933: encryptionBuffer, 0, newEngine);
2934:
2935: if (SanityManager.DEBUG)
2936: SanityManager.ASSERT(len == pageSize,
2937: "Encrypted page length != page length");
2938:
2939: return encryptionBuffer;
2940: }
2941:
2942: /**
2943: * Get encryption buffer.
2944: * MT - not safe, call within synchronized block and only use the
2945: * returned byte array withing synchronized block.
2946: * @return byte array to be used for encryping a page.
2947: */
2948: protected byte[] getEncryptionBuffer() {
2949:
2950: if (encryptionBuffer == null
2951: || encryptionBuffer.length < pageSize)
2952: encryptionBuffer = new byte[pageSize];
2953: return encryptionBuffer;
2954: }
2955:
2956: /*
2957: * page preallocation
2958: */
2959:
2960: /**
2961: preAllocate writes out the preallocated pages to disk if necessary.
2962:
2963: <BR>Make sure the container is large enough and the
2964: pages are well formatted. The only reason to do this is to save some
2965: I/O during page initialization. Once the initPage log record is
2966: written, it is expected that the page really do exist and is well
2967: formed or recovery will fail. However, we can gain some performance by
2968: writing a bunch of pages at a time rather than one at a time.
2969:
2970: <BR>If it doesn't make sense for the the implementation to have
2971: pre-allocation, just return 0.
2972:
2973: <BR>If the container is not being logged, don't actually do anything,
2974: just return 0.
2975:
2976: @return number of successfully preallocated page, or 0 if
2977: no page has been preallocated
2978:
2979: @param lastPreallocPagenum the last preallocated page number as known
2980: by the allocation page
2981: @param preAllocSize try to preallocate this page number of pages.
2982: Since only the container knows how many pages are actually on
2983: disk, it may determine that certain number of pages that the
2984: allocation page thinks need to be preallocated is already
2985: allocated, in those case, act as if the preallocation is
2986: successful.
2987: */
2988: protected abstract int preAllocate(long lastPreallocPagenum,
2989: int preAllocSize);
2990:
2991: /**
2992: Preallocate the pages - actually doing it, called by subclass only
2993: */
2994: protected int doPreAllocatePages(long lastPreallocPagenum,
2995: int preAllocSize) {
2996: if (SanityManager.DEBUG)
2997: SanityManager
2998: .ASSERT(!dataFactory.isReadOnly(),
2999: "how can we be Preallocating pages in a read only database?");
3000:
3001: // initialize and a new page in cache
3002: int[] createArgs = new int[5];
3003: createArgs[0] = StoredPage.FORMAT_NUMBER; // default is a stored page
3004: createArgs[1] = CachedPage.WRITE_NO_SYNC; // write it but no sync
3005: createArgs[2] = pageSize;
3006: createArgs[3] = spareSpace;
3007: createArgs[4] = minimumRecordSize;
3008:
3009: StoredPage page = new StoredPage();
3010: page.setFactory(dataFactory);
3011:
3012: boolean error = false;
3013: int count = 0;
3014:
3015: while (count < preAllocSize) {
3016: PageKey pkey = new PageKey(identity, lastPreallocPagenum
3017: + count + 1);
3018: try {
3019: // create Identity will do a writePage
3020: page.createIdentity(pkey, createArgs);
3021:
3022: // if create identity somehow failed to do a write page
3023: if (SanityManager.DEBUG)
3024: SanityManager
3025: .ASSERT(!page.isDirty(),
3026: "create identity failed to do a write page");
3027:
3028: page.clearIdentity(); // ready the page for the next loop
3029:
3030: } catch (StandardException se) {
3031: // if something went wrong, stop and return how many we did
3032: // successfully
3033: error = true;
3034: }
3035:
3036: if (error)
3037: break;
3038:
3039: count++;
3040: }
3041:
3042: return count;
3043: }
3044:
3045: protected int getPageSize() {
3046: return pageSize;
3047: }
3048:
3049: protected int getSpareSpace() {
3050: return spareSpace;
3051: }
3052:
3053: protected int getMinimumRecordSize() {
3054: return minimumRecordSize;
3055: }
3056:
3057: private synchronized void switchToMultiInsertPageMode(
3058: BaseContainerHandle handle) throws StandardException {
3059: if (lastInsertedPage.length == 1) {
3060: long last = lastInsertedPage[0];
3061:
3062: lastInsertedPage = new long[4];
3063: lastInsertedPage[0] = last;
3064:
3065: for (int i = 3; i > 0; i--) {
3066: Page page = addPage(handle, false);
3067: lastInsertedPage[i] = page.getPageNumber();
3068: page.unlatch();
3069: }
3070: }
3071: }
3072:
3073: /*
3074: * Setting and getting lastInserted Page and lastUnfilledPage in a thead
3075: * safe manner.
3076: */
3077: private synchronized long getLastInsertedPage() {
3078: if (lastInsertedPage.length == 1) {
3079: if (SanityManager.DEBUG)
3080: SanityManager.ASSERT(lastInsertedPage_index == 0);
3081:
3082: // optimize the usual case where no concurrent insert has kicked us
3083: // into multi-page mode - ie. only ONE last page.
3084: return (lastInsertedPage[0]);
3085: } else {
3086: long ret = lastInsertedPage[lastInsertedPage_index++];
3087:
3088: if (lastInsertedPage_index > (lastInsertedPage.length - 1)) {
3089: lastInsertedPage_index = 0;
3090: }
3091:
3092: return (ret);
3093: }
3094: }
3095:
3096: private synchronized long getLastUnfilledPage() {
3097: return lastUnfilledPage;
3098: }
3099:
3100: private synchronized void initializeLastInsertedPage(int size) {
3101: lastInsertedPage = new long[size];
3102:
3103: for (int i = lastInsertedPage.length - 1; i >= 0; i--)
3104: lastInsertedPage[i] = ContainerHandle.INVALID_PAGE_NUMBER;
3105:
3106: lastInsertedPage_index = 0;
3107: }
3108:
3109: private synchronized void setLastInsertedPage(long val) {
3110: lastInsertedPage[lastInsertedPage_index] = val;
3111: }
3112:
3113: private synchronized void setLastUnfilledPage(long val) {
3114: lastUnfilledPage = val;
3115: }
3116:
3117: /*
3118: ** Hide our super-classes methods to ensure that cache management
3119: ** is correct when the container is obtained and release.
3120: */
3121:
3122: /**
3123: The container is kept by the find() in File.openContainer.
3124: */
3125: protected void letGo(BaseContainerHandle handle) {
3126: super .letGo(handle);
3127:
3128: containerCache.release(this );
3129: }
3130:
3131: protected BasePage latchPage(BaseContainerHandle handle,
3132: BasePage foundPage, boolean wait) throws StandardException {
3133:
3134: if (foundPage == null)
3135: return null;
3136:
3137: BasePage ret = super .latchPage(handle, foundPage, wait);
3138: if (ret == null) {
3139: // page is still cached
3140: pageCache.release((Cacheable) foundPage);
3141: }
3142: return ret;
3143: }
3144:
3145: /**
3146: * backup the container.
3147: *
3148: * @param handle the container handle.
3149: * @param backupLocation location of the backup container.
3150: * @exception StandardException Standard Derby error policy
3151: */
3152: protected abstract void backupContainer(BaseContainerHandle handle,
3153: String backupLocation) throws StandardException;
3154: }
|