0001: /*
0002:
0003: Derby - Class org.apache.derby.impl.store.raw.data.StreamFileContainer
0004:
0005: Licensed to the Apache Software Foundation (ASF) under one or more
0006: contributor license agreements. See the NOTICE file distributed with
0007: this work for additional information regarding copyright ownership.
0008: The ASF licenses this file to you under the Apache License, Version 2.0
0009: (the "License"); you may not use this file except in compliance with
0010: the License. You may obtain a copy of the License at
0011:
0012: http://www.apache.org/licenses/LICENSE-2.0
0013:
0014: Unless required by applicable law or agreed to in writing, software
0015: distributed under the License is distributed on an "AS IS" BASIS,
0016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017: See the License for the specific language governing permissions and
0018: limitations under the License.
0019:
0020: */
0021:
0022: package org.apache.derby.impl.store.raw.data;
0023:
0024: import org.apache.derby.iapi.reference.SQLState;
0025:
0026: import org.apache.derby.iapi.services.context.ContextService;
0027:
0028: import org.apache.derby.iapi.services.sanity.SanityManager;
0029: import org.apache.derby.iapi.services.io.Storable;
0030: import org.apache.derby.iapi.services.io.StreamStorable;
0031: import org.apache.derby.iapi.services.io.FormatIdInputStream;
0032: import org.apache.derby.iapi.services.io.FormatIdOutputStream;
0033: import org.apache.derby.iapi.services.io.FormatIdUtil;
0034: import org.apache.derby.iapi.services.io.StoredFormatIds;
0035: import org.apache.derby.iapi.services.io.TypedFormat;
0036: import org.apache.derby.iapi.services.monitor.Monitor;
0037:
0038: import org.apache.derby.iapi.error.StandardException;
0039: import org.apache.derby.iapi.store.access.AccessFactory;
0040: import org.apache.derby.iapi.store.access.RowSource;
0041: import org.apache.derby.iapi.store.access.RowUtil;
0042: import org.apache.derby.iapi.store.access.TransactionController;
0043: import org.apache.derby.iapi.store.raw.ContainerKey;
0044: import org.apache.derby.iapi.store.raw.RawStoreFactory;
0045: import org.apache.derby.iapi.store.raw.StreamContainerHandle;
0046:
0047: import org.apache.derby.io.StorageFactory;
0048: import org.apache.derby.io.WritableStorageFactory;
0049: import org.apache.derby.io.StorageFile;
0050:
0051: import org.apache.derby.impl.store.raw.data.DecryptInputStream;
0052: import org.apache.derby.impl.store.raw.data.StoredFieldHeader;
0053: import org.apache.derby.impl.store.raw.data.StoredRecordHeader;
0054:
0055: import org.apache.derby.iapi.services.io.ArrayInputStream;
0056: import org.apache.derby.iapi.services.io.FormatableBitSet;
0057: import org.apache.derby.iapi.services.io.CompressedNumber;
0058: import org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream;
0059: import org.apache.derby.iapi.services.io.LimitInputStream;
0060: import org.apache.derby.iapi.services.property.PropertyUtil;
0061: import org.apache.derby.iapi.util.ReuseFactory;
0062:
0063: import java.util.Properties;
0064: import java.io.InputStream;
0065: import java.io.BufferedInputStream;
0066: import java.io.OutputStream;
0067: import java.io.IOException;
0068: import java.io.EOFException;
0069: import java.io.InvalidClassException;
0070: import java.io.Externalizable;
0071: import java.security.AccessController;
0072: import java.security.PrivilegedExceptionAction;
0073: import java.security.PrivilegedActionException;
0074: import java.io.FileNotFoundException;
0075:
0076: /**
0077:
0078: The format of this stream file is:
0079: (RH) (FH) (field data) (FH) (field data) ........ (FH) (field data)
0080:
0081: Record header is stored once at the beginning of the file
0082: for all the rows stored in this file.
0083: Record Header indicates how many fields are in each row.
0084: Then we just stored all the column from each row.
0085: Field header stored on this file is fixed size with fieldDataLength
0086: size set to LARGE_SLOT_SIZE (4) bytes.
0087:
0088: NOTE: No locks are used in this container. All transaction are not logged.
0089:
0090: **/
0091:
0092: public class StreamFileContainer implements TypedFormat,
0093: PrivilegedExceptionAction {
0094:
0095: /**************************************************************************
0096: * Constant Fields of the class
0097: **************************************************************************
0098: */
0099:
0100: /*
0101: * typed format
0102: * format Id must fit in 4 bytes
0103: */
0104: protected static int formatIdInteger = StoredFormatIds.RAW_STORE_SINGLE_CONTAINER_STREAM_FILE;
0105:
0106: // 4 bytes for field data length
0107: protected static final int LARGE_SLOT_SIZE = 4;
0108:
0109: protected static final int MIN_BUFFER_SIZE = RawStoreFactory.STREAM_FILE_BUFFER_SIZE_MINIMUM;
0110: protected static final int FIELD_STATUS = StoredFieldHeader
0111: .setFixed(StoredFieldHeader.setInitial(), true);
0112: protected static final int FIELD_HEADER_SIZE = StoredFieldHeader
0113: .size(FIELD_STATUS, 0, LARGE_SLOT_SIZE);
0114:
0115: /**************************************************************************
0116: * Fields of the class
0117: **************************************************************************
0118: */
0119: protected ContainerKey identity;
0120: private BaseDataFileFactory dataFactory; // the factory that made me
0121:
0122: private int bufferSize;
0123:
0124: private StorageFile file;
0125:
0126: private OutputStream fileOut;
0127: private DynamicByteArrayOutputStream out;
0128: private FormatIdOutputStream logicalDataOut;
0129:
0130: private InputStream fileIn;
0131: private BufferedInputStream bufferedIn;
0132: private DecryptInputStream decryptIn;
0133: private LimitInputStream limitIn;
0134: private FormatIdInputStream logicalDataIn;
0135:
0136: private StoredRecordHeader recordHeader;
0137:
0138: private byte[] ciphertext;
0139: private byte[] zeroBytes; // in case encryption
0140: // stream needs pad.
0141:
0142: /* privileged actions */
0143: private static final int STORAGE_FILE_EXISTS_ACTION = 1;
0144: private static final int STORAGE_FILE_DELETE_ACTION = 2;
0145: private static final int STORAGE_FILE_MKDIRS_ACTION = 3;
0146: private static final int STORAGE_FILE_GET_OUTPUT_STREAM_ACTION = 4;
0147: private static final int STORAGE_FILE_GET_INPUT_STREAM_ACTION = 5;
0148: private int actionCode;
0149: private StorageFile actionStorageFile;
0150:
0151: /**************************************************************************
0152: * Constructors for This class:
0153: **************************************************************************
0154: */
0155:
0156: /**
0157: * Constructor.
0158: *
0159: * @exception StandardException Standard exception policy.
0160: **/
0161: StreamFileContainer(ContainerKey identity,
0162: BaseDataFileFactory dataFactory) throws StandardException {
0163: this .identity = identity;
0164: this .dataFactory = dataFactory;
0165: }
0166:
0167: /**
0168: * Constructor
0169: * <p>
0170: * when rowSource is passed to the constructor, it will be loaded into the
0171: * container after the container has been created.
0172: * <p>
0173: *
0174: * @exception StandardException Standard exception policy.
0175: **/
0176: StreamFileContainer(ContainerKey identity,
0177: BaseDataFileFactory dataFactory, Properties prop)
0178: throws StandardException {
0179: this .identity = identity;
0180: this .dataFactory = dataFactory;
0181:
0182: try {
0183: file = getFileName(identity, true, false);
0184:
0185: if (privExists(file)) {
0186: // note I'm left in the no-identity state as fillInIdentity()
0187: // hasn't been called.
0188: throw StandardException.newException(
0189: SQLState.FILE_EXISTS, file);
0190: }
0191:
0192: // get the properties to set buffer size
0193: // derby.storage.streamFileBufferSize
0194: getContainerProperties(prop);
0195:
0196: } catch (SecurityException se) {
0197: throw StandardException.newException(SQLState.FILE_CREATE,
0198: se, file);
0199: }
0200: }
0201:
0202: /**************************************************************************
0203: * Private/Protected methods of This class:
0204: **************************************************************************
0205: */
0206:
0207: /**
0208: * Open a stream file container.
0209: * <p>
0210: * Open a container. Open the file that maps to this container, if the
0211: * file does not exist then we assume the container was never created
0212: * and return.
0213: * If the file exists but we have trouble opening it then we throw some
0214: * exception.
0215: * <p>
0216: *
0217: * @return The opened StreamFileContainer.
0218: *
0219: * @param forUpdate Currently only accepts false, updating and existing
0220: * stream file container is not currently supported.
0221: *
0222: * @exception StandardException Standard exception policy.
0223: **/
0224: protected StreamFileContainer open(boolean forUpdate)
0225: throws StandardException {
0226:
0227: file = getFileName(this .identity, false, true);
0228: if (!privExists(file))
0229: return null;
0230:
0231: try {
0232: if (!forUpdate) {
0233: fileIn = privGetInputStream(file);
0234:
0235: if (dataFactory.databaseEncrypted()) {
0236: // if the database is encrypted, when reading the data back
0237: // from the file stream, we need to used the decrypt stream
0238: // to buffer up the bytes for reading. DecryptInputStream
0239: // also decrypts the data.
0240:
0241: MemByteHolder byteHolder = new MemByteHolder(
0242: RawStoreFactory.STREAM_FILE_BUFFER_SIZE_DEFAULT);
0243:
0244: decryptIn = new DecryptInputStream(fileIn,
0245: byteHolder, dataFactory);
0246:
0247: limitIn = new LimitInputStream(decryptIn);
0248: } else {
0249: bufferedIn = new BufferedInputStream(
0250: fileIn,
0251: RawStoreFactory.STREAM_FILE_BUFFER_SIZE_DEFAULT);
0252:
0253: limitIn = new LimitInputStream(bufferedIn);
0254: }
0255:
0256: // the logicalDataIn input stream is on top of a limit Input
0257: // stream, use a limit stream to make sure we don't read off
0258: // more then what each column says it contains
0259:
0260: logicalDataIn = new FormatIdInputStream(limitIn);
0261:
0262: // get the record header
0263: recordHeader = new StoredRecordHeader();
0264: recordHeader.read(logicalDataIn);
0265:
0266: } else {
0267: if (SanityManager.DEBUG)
0268: SanityManager
0269: .THROWASSERT("updating existing stream container not supported yet");
0270:
0271: return null;
0272: }
0273: } catch (IOException ioe) {
0274: throw StandardException.newException(SQLState.FILE_CREATE,
0275: ioe, file);
0276: }
0277:
0278: return this ;
0279: }
0280:
0281: /**
0282: * Close the stream file.
0283: * <p>
0284: * Close this stream file, and all streams associated with it.
0285: * <p>
0286: *
0287: * @exception StandardException Standard exception policy.
0288: **/
0289: protected void close() {
0290: try {
0291:
0292: if (fileIn != null) {
0293: fileIn.close();
0294: fileIn = null;
0295: if (dataFactory.databaseEncrypted()) {
0296: decryptIn.close();
0297: decryptIn = null;
0298: } else {
0299: bufferedIn.close();
0300: bufferedIn = null;
0301: }
0302: logicalDataIn.close();
0303: logicalDataIn = null;
0304: }
0305:
0306: if (fileOut != null) {
0307: fileOut.close();
0308: logicalDataOut.close();
0309: fileOut = null;
0310: logicalDataOut = null;
0311: out = null;
0312: }
0313:
0314: } catch (IOException ioe) {
0315: // ignore close errors from fileOut.close() and fileIn.close() -
0316: // there isn't much we can do about them anyway - and some of the
0317: // interfaces don't want to deal with exceptions from close().
0318:
0319: /*
0320: throw StandardException.newException(
0321: SQLState.FILE_CREATE, ioe, file);
0322: */
0323: }
0324: }
0325:
0326: /**************************************************************************
0327: * Public Methods of This class:
0328: **************************************************************************
0329: */
0330:
0331: /**
0332: * Return my format identifier.
0333: **/
0334: public int getTypeFormatId() {
0335: return StoredFormatIds.RAW_STORE_SINGLE_CONTAINER_STREAM_FILE;
0336: }
0337:
0338: /**
0339: * Request the system properties associated with a stream container.
0340: * <p>
0341: * Request the value of properties associated with a stream container.
0342: * The following properties can be requested:
0343: * derby.storage.streamFileBufferSize
0344: *
0345: * <p>
0346: * To get the value of a particular property add it to the property list,
0347: * and on return the value of the property will be set to it's current
0348: * value. For example:
0349: *
0350: * get_prop(ConglomerateController cc)
0351: * {
0352: * Properties prop = new Properties();
0353: * prop.put("derby.storage.streamFileBufferSize", "");
0354: * cc.getContainerProperties(prop);
0355: *
0356: * System.out.println(
0357: * "stream table's buffer size = " +
0358: * prop.getProperty("derby.storage.streamFileBufferSize");
0359: * }
0360: *
0361: * @param prop Property list to fill in.
0362: *
0363: * @exception StandardException Standard exception policy.
0364: **/
0365: public void getContainerProperties(Properties prop)
0366: throws StandardException {
0367:
0368: AccessFactory af = (AccessFactory) Monitor.getServiceModule(
0369: dataFactory, AccessFactory.MODULE);
0370:
0371: TransactionController tc = (af == null) ? null : af
0372: .getTransaction(ContextService.getFactory()
0373: .getCurrentContextManager());
0374:
0375: bufferSize = PropertyUtil.getServiceInt(tc, prop,
0376: RawStoreFactory.STREAM_FILE_BUFFER_SIZE_PARAMETER,
0377: RawStoreFactory.STREAM_FILE_BUFFER_SIZE_MINIMUM,
0378: RawStoreFactory.STREAM_FILE_BUFFER_SIZE_MAXIMUM,
0379: RawStoreFactory.STREAM_FILE_BUFFER_SIZE_DEFAULT);
0380: }
0381:
0382: /**
0383: * Request the container key associated with the stream container.
0384: **/
0385: public ContainerKey getIdentity() {
0386: return this .identity;
0387: }
0388:
0389: /**
0390: * Can I use this container?
0391: * <p>
0392: * This method always return true right now.
0393: * In the future when there are different uses for this container,
0394: * we may need to add qualifications for this.
0395: *
0396: * @exception StandardException Standard exception policy.
0397: **/
0398: protected boolean use(StreamContainerHandle handle)
0399: throws StandardException {
0400: return true;
0401: }
0402:
0403: /**
0404: * load data into this container.
0405: * <p>
0406: * populate the stream container with data in the rowSource
0407: * <p>
0408: *
0409: * @param rowSource The row source to get rows to load into this container.
0410: *
0411: * @exception StandardException Standard exception policy.
0412: **/
0413: public void load(RowSource rowSource) throws StandardException {
0414: // use this output stream to buffer rows before inserting into file.
0415: out = new DynamicByteArrayOutputStream(bufferSize);
0416: logicalDataOut = new FormatIdOutputStream(out);
0417: boolean encrypted = dataFactory.databaseEncrypted();
0418:
0419: // reserve the first dataFactory.getEncryptionBlockSize() - 1 bytes, if the database is
0420: // encrypted These reserved bytes will be used to pad the byte array if
0421: // it is not dataFactory.getEncryptionBlockSize() aligned.
0422: if (encrypted) {
0423: if (zeroBytes == null)
0424: zeroBytes = new byte[dataFactory
0425: .getEncryptionBlockSize() - 1];
0426:
0427: out.write(zeroBytes, 0, dataFactory
0428: .getEncryptionBlockSize() - 1);
0429: }
0430:
0431: try {
0432: fileOut = privGetOutputStream(file);
0433:
0434: FormatableBitSet validColumns = rowSource.getValidColumns();
0435:
0436: Object[] row = rowSource.getNextRowFromRowSource();
0437:
0438: int numberFields = 0;
0439: if (validColumns != null) {
0440: for (int i = validColumns.getLength() - 1; i >= 0; i--) {
0441: if (validColumns.isSet(i)) {
0442: numberFields = i + 1;
0443: break;
0444: }
0445: }
0446: } else {
0447: numberFields = row.length;
0448: }
0449:
0450: // make the record header to have 0 record id
0451: recordHeader = new StoredRecordHeader(0, numberFields);
0452:
0453: // write the record header once for all the rows, directly to the
0454: // beginning of the file.
0455: int rhLen = recordHeader.write(out);
0456:
0457: int validColumnsSize = validColumns == null ? 0
0458: : validColumns.getLength();
0459:
0460: while (row != null) {
0461:
0462: int arrayPosition = -1;
0463:
0464: for (int i = 0; i < numberFields; i++) {
0465:
0466: // write each column out
0467: if (validColumns == null) {
0468: arrayPosition++;
0469: Object column = row[arrayPosition];
0470: writeColumn(column);
0471: } else {
0472:
0473: if (validColumnsSize > i
0474: && validColumns.isSet(i)) {
0475: arrayPosition++;
0476: Object column = row[arrayPosition];
0477: writeColumn(column);
0478: } else {
0479: // it is a non-existent column
0480: writeColumn(null);
0481: }
0482: }
0483:
0484: // put the buffer onto the page, only if it exceeded the
0485: // original buffer size or it has less than 100 bytes left
0486: // in the buffer
0487: if ((out.getUsed() >= bufferSize)
0488: || ((bufferSize - out.getUsed()) < MIN_BUFFER_SIZE)) {
0489: writeToFile();
0490: }
0491: }
0492:
0493: // get the next row and its valid columns from the rowSource
0494: row = rowSource.getNextRowFromRowSource();
0495: }
0496:
0497: // Write the buffer to the file if there is something in the output
0498: // buffer. Remember we pad the output buffer with
0499: // dataFactory.getEncryptionBlockSize() - 1 if this is an encypted database
0500: if (encrypted) {
0501: if (out.getUsed() > (dataFactory
0502: .getEncryptionBlockSize() - 1))
0503: writeToFile();
0504: } else if (out.getUsed() > 0) {
0505: writeToFile();
0506: }
0507:
0508: } catch (IOException ioe) {
0509: // handle IO error...
0510: throw StandardException.newException(
0511: SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
0512:
0513: } finally {
0514: close();
0515: }
0516: }
0517:
0518: /*
0519:
0520: */
0521: /**
0522: * Write the buffer to the file.
0523: * <p>
0524: * If the database is encrypted, the dataFactory.getEncryptionBlockSize() - 1 reserved bytes will
0525: * be used to pad the byte array to be dataFactory.getEncryptionBlockSize()
0526: * aligned. Before the bytes are encrypted and written to the file stream,
0527: * the actual length of the byte array is written out as a compressed
0528: * integer. This number will be used when decrypting the data.
0529: *
0530: * If the database is not encrypted, then, we don't reserve the bytes
0531: * upfront, and we simple just write the bytes out to the file stream.
0532: *
0533: * @exception StandardException Standard exception policy.
0534: **/
0535: private void writeToFile() throws StandardException {
0536:
0537: try {
0538: if (dataFactory.databaseEncrypted()) {
0539: // if db is encrypted,
0540: // use the first ENCRYPTION_ALIGN bytes for padding.
0541: //
0542: int realLen = out.getUsed()
0543: - (dataFactory.getEncryptionBlockSize() - 1);
0544: int tail = realLen
0545: % dataFactory.getEncryptionBlockSize();
0546: int padding = (tail == 0) ? 0 : (dataFactory
0547: .getEncryptionBlockSize() - tail);
0548:
0549: int startByte = (tail == 0) ? (dataFactory
0550: .getEncryptionBlockSize() - 1) : (tail - 1);
0551: int encryptedLen = realLen + padding;
0552:
0553: // there is nothing to write, just the encryption padding
0554: if (realLen <= 0)
0555: return;
0556:
0557: if (ciphertext == null) {
0558: ciphertext = new byte[encryptedLen];
0559: } else {
0560: if (ciphertext.length < encryptedLen)
0561: ciphertext = new byte[encryptedLen];
0562: }
0563:
0564: dataFactory.encrypt(out.getByteArray(), startByte,
0565: encryptedLen, ciphertext, 0, false);
0566:
0567: // write out the actual length, then the encrypted bytes.
0568: CompressedNumber.writeInt(fileOut, realLen);
0569: dataFactory.writeInProgress();
0570: try {
0571: fileOut.write(ciphertext, 0, encryptedLen);
0572: } finally {
0573: dataFactory.writeFinished();
0574: }
0575:
0576: // reset the dynamic buffer
0577: out.reset();
0578:
0579: // reserve bytes if database is encrypted.
0580: if (dataFactory.databaseEncrypted()) {
0581: if (zeroBytes == null)
0582: zeroBytes = new byte[dataFactory
0583: .getEncryptionBlockSize() - 1];
0584:
0585: out.write(zeroBytes, 0, dataFactory
0586: .getEncryptionBlockSize() - 1);
0587: }
0588:
0589: } else {
0590: // nothing to write
0591: if (out.getUsed() == 0)
0592: return;
0593:
0594: dataFactory.writeInProgress();
0595: try {
0596: fileOut.write(out.getByteArray(), 0, out.getUsed());
0597: } finally {
0598: dataFactory.writeFinished();
0599: }
0600:
0601: // reset the dynamic buffer
0602: out.reset();
0603: }
0604: } catch (IOException ioe) {
0605: throw StandardException.newException(
0606: SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
0607: }
0608: }
0609:
0610: private void writeColumn(Object column) throws StandardException,
0611: IOException {
0612:
0613: int fieldStatus = FIELD_STATUS;
0614: if (column == null) {
0615: // just write a non-existent header.
0616: fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
0617: StoredFieldHeader.write(out, fieldStatus, 0,
0618: LARGE_SLOT_SIZE);
0619: return;
0620: }
0621:
0622: // if the column is a null column, write the field header now.
0623: if (column instanceof Storable) {
0624: Storable sColumn = (Storable) column;
0625: if (sColumn.isNull()) {
0626: fieldStatus = StoredFieldHeader.setNull(fieldStatus,
0627: true);
0628: StoredFieldHeader.write(out, fieldStatus, 0,
0629: LARGE_SLOT_SIZE);
0630: return;
0631: }
0632: }
0633:
0634: int beginPosition = out.getPosition();
0635: int fieldDataLength = 0;
0636:
0637: // write out the header, mostly to reserve the space
0638: StoredFieldHeader.write(out, fieldStatus, fieldDataLength,
0639: LARGE_SLOT_SIZE);
0640:
0641: if (column instanceof StreamStorable) {
0642: if (((StreamStorable) column).returnStream() != null) {
0643: column = (InputStream) ((StreamStorable) column)
0644: .returnStream();
0645: }
0646: }
0647:
0648: if (column instanceof InputStream) {
0649: InputStream inColumn = (InputStream) column;
0650: int bufferLen = inColumn.available();
0651: byte[] bufData = new byte[bufferLen];
0652:
0653: do {
0654: int lenRead = inColumn.read(bufData, bufferLen, 0);
0655: if (lenRead != -1) {
0656: fieldDataLength += lenRead;
0657: out.write(bufData, lenRead, 0);
0658: } else {
0659: break;
0660: }
0661: } while (true);
0662:
0663: } else if (column instanceof Storable) {
0664:
0665: Storable sColumn = (Storable) column;
0666: // write field data to the stream, we already handled the null case
0667:
0668: sColumn.writeExternal(logicalDataOut);
0669: fieldDataLength = out.getPosition() - beginPosition
0670: - FIELD_HEADER_SIZE;
0671:
0672: } else {
0673: // Serializable/Externalizable/Formattable
0674: // all look the same at this point.
0675: logicalDataOut.writeObject(column);
0676: fieldDataLength = out.getPosition() - beginPosition
0677: - FIELD_HEADER_SIZE;
0678: }
0679:
0680: // Now we go back to update the fieldDataLength in the field header
0681: int endPosition = out.getPosition();
0682: out.setPosition(beginPosition);
0683:
0684: StoredFieldHeader.write(out, fieldStatus, fieldDataLength,
0685: LARGE_SLOT_SIZE);
0686:
0687: // set position to the end of the field
0688: if (!StoredFieldHeader.isNull(fieldStatus))
0689: out.setPosition(endPosition);
0690: }
0691:
0692: public boolean fetchNext(Object[] row) throws StandardException {
0693:
0694: boolean inUserCode = false;
0695: int columnId = 0;
0696:
0697: try {
0698:
0699: // Get the number of columns in the row.
0700: int numberFields = recordHeader.getNumberFields();
0701:
0702: int arrayPosition = 0;
0703: for (columnId = 0; columnId < numberFields; columnId++) {
0704:
0705: if (arrayPosition >= row.length)
0706: break;
0707:
0708: limitIn.clearLimit();
0709:
0710: // read the field header
0711: int fieldStatus = StoredFieldHeader
0712: .readStatus(logicalDataIn);
0713: int fieldDataLength = StoredFieldHeader
0714: .readFieldDataLength(logicalDataIn,
0715: fieldStatus, LARGE_SLOT_SIZE);
0716:
0717: limitIn.setLimit(fieldDataLength);
0718:
0719: if (SanityManager.DEBUG) {
0720:
0721: if (StoredFieldHeader.isExtensible(fieldStatus)) {
0722: SanityManager
0723: .THROWASSERT("extensible fields not supported yet. columnId = "
0724: + columnId);
0725: }
0726:
0727: SanityManager.ASSERT(!StoredFieldHeader
0728: .isOverflow(fieldStatus),
0729: "overflow field is not supported yet");
0730: }
0731:
0732: Object column = row[arrayPosition];
0733:
0734: // Deal with Storable columns
0735: if (StoredFieldHeader.isNullable(fieldStatus)) {
0736:
0737: if (column == null) {
0738: throw StandardException.newException(
0739: SQLState.DATA_NULL_STORABLE_COLUMN,
0740: Integer.toString(columnId));
0741: }
0742:
0743: // SRW-DJD RESOLVE: - fix error message
0744: if (!(column instanceof Storable)) {
0745: throw StandardException.newException(
0746: SQLState.DATA_NULL_STORABLE_COLUMN,
0747: column.getClass().getName());
0748: }
0749:
0750: Storable sColumn = (Storable) column;
0751:
0752: // is the column null ?
0753: if (StoredFieldHeader.isNull(fieldStatus)) {
0754:
0755: sColumn.restoreToNull();
0756: arrayPosition++;
0757: continue;
0758: }
0759:
0760: inUserCode = true;
0761: sColumn.readExternal(logicalDataIn);
0762: inUserCode = false;
0763: arrayPosition++;
0764: continue;
0765: }
0766:
0767: // Only Storables can be null ... SRW-DJD RESOLVE: - fix error message
0768: if (StoredFieldHeader.isNull(fieldStatus)) {
0769: throw StandardException.newException(
0770: SQLState.DATA_NULL_STORABLE_COLUMN, Integer
0771: .toString(columnId));
0772: }
0773:
0774: // This is a non-extensible field, which means the caller must
0775: // know the correct type and thus the element in row is the
0776: // correct type or null. If the element implements
0777: // Externalizable then we can just fill it in, otherwise it
0778: // must be Serializable and we have to throw it away.
0779:
0780: Object neColumn = row[arrayPosition];
0781:
0782: if (neColumn instanceof Externalizable) {
0783:
0784: Externalizable exColumn = (Externalizable) neColumn;
0785:
0786: inUserCode = true;
0787: exColumn.readExternal(logicalDataIn);
0788: inUserCode = false;
0789:
0790: arrayPosition++;
0791: continue;
0792: }
0793:
0794: // neColumn will be ignored
0795: neColumn = null;
0796: inUserCode = true;
0797: row[arrayPosition] = logicalDataIn.readObject();
0798: inUserCode = false;
0799:
0800: arrayPosition++;
0801: continue;
0802: }
0803:
0804: } catch (IOException ioe) {
0805:
0806: // an exception during the restore of a user column, this doesn't
0807: // make the databse corrupt, just that this field is inaccessable
0808: if (inUserCode) {
0809:
0810: if (ioe instanceof EOFException) {
0811: throw StandardException.newException(
0812: SQLState.DATA_STORABLE_READ_MISMATCH, ioe,
0813: logicalDataIn.getErrorInfo());
0814: }
0815:
0816: throw StandardException.newException(
0817: SQLState.DATA_STORABLE_READ_EXCEPTION, ioe,
0818: logicalDataIn.getErrorInfo());
0819: }
0820:
0821: if (ioe instanceof InvalidClassException) {
0822: throw StandardException.newException(
0823: SQLState.DATA_STORABLE_READ_EXCEPTION, ioe,
0824: logicalDataIn.getErrorInfo());
0825: }
0826:
0827: // If we are at the end of the file, trying to fetch the first
0828: // column, then we know there is no more rows to fetch
0829: if ((ioe instanceof EOFException) && (columnId == 0)) {
0830: close();
0831: return false;
0832: }
0833:
0834: throw dataFactory.markCorrupt(StandardException
0835: .newException(
0836: SQLState.DATA_CORRUPT_STREAM_CONTAINER,
0837: ioe, identity));
0838:
0839: } catch (ClassNotFoundException cnfe) {
0840:
0841: if (SanityManager.DEBUG) {
0842: SanityManager.ASSERT(inUserCode);
0843: }
0844:
0845: // an exception during the restore of a user column, this doesn't
0846: // make the databse corrupt, just that this field is inaccessable
0847: throw StandardException.newException(
0848: SQLState.DATA_STORABLE_READ_MISSING_CLASS, cnfe,
0849: logicalDataIn.getErrorInfo());
0850:
0851: } catch (LinkageError le) {
0852: if (inUserCode) {
0853: throw StandardException.newException(
0854: SQLState.DATA_STORABLE_READ_EXCEPTION, le,
0855: logicalDataIn.getErrorInfo());
0856: }
0857: throw le;
0858: }
0859:
0860: return true;
0861:
0862: }
0863:
0864: /**
0865: * Close the stream file and remove the file.
0866: *
0867: * @exception StandardException Segment directory cannot be created
0868: **/
0869: public boolean removeContainer() throws StandardException {
0870: close();
0871:
0872: if (privExists(file)) {
0873: return privDelete(file);
0874: } else {
0875: return true;
0876: }
0877:
0878: }
0879:
0880: /**
0881: * Return a file name for the identity.
0882: * <p>
0883: * Return a valid file name for the identity, or null if the data
0884: * directory for this segment cannot be created
0885: *
0886: * @exception StandardException Segment directory cannot be created
0887: **/
0888: protected StorageFile getFileName(ContainerKey identity,
0889: boolean forCreate, boolean errorOK)
0890: throws StandardException {
0891: if (identity.getSegmentId() == StreamContainerHandle.TEMPORARY_SEGMENT) {
0892: return (dataFactory.storageFactory.newStorageFile(
0893: dataFactory.storageFactory.getTempDir(), "T"
0894: + identity.getContainerId() + ".tmp"));
0895: } else {
0896: if (SanityManager.DEBUG)
0897: SanityManager
0898: .THROWASSERT("cannot create stream container in non-temp segments yet.");
0899:
0900: StorageFile container = dataFactory.getContainerPath(
0901: identity, false);
0902:
0903: if (!privExists(container)) {
0904:
0905: if (!forCreate)
0906: return null;
0907:
0908: StorageFile directory = container.getParentDir();
0909:
0910: if (!privExists(directory)) {
0911: // make sure only 1 thread can create a segment at one time
0912: synchronized (dataFactory) {
0913: if (!privExists(directory)) {
0914: if (!privMkdirs(directory)) {
0915: if (errorOK)
0916: return null;
0917: else
0918: throw StandardException
0919: .newException(
0920: SQLState.FILE_CANNOT_CREATE_SEGMENT,
0921: directory);
0922: }
0923: }
0924: }
0925: }
0926: }
0927: return container;
0928: }
0929: }
0930:
0931: private synchronized boolean privExists(StorageFile file) {
0932: actionCode = STORAGE_FILE_EXISTS_ACTION;
0933: actionStorageFile = file;
0934:
0935: try {
0936: Object ret = AccessController.doPrivileged(this );
0937: return ((Boolean) ret).booleanValue();
0938: } catch (PrivilegedActionException pae) {
0939: // method executed under this priveleged block
0940: // does not throw an exception
0941: return false;
0942: } finally {
0943: actionStorageFile = null;
0944: }
0945: }
0946:
0947: private synchronized boolean privMkdirs(StorageFile file) {
0948: actionCode = STORAGE_FILE_MKDIRS_ACTION;
0949: actionStorageFile = file;
0950:
0951: try {
0952: Object ret = AccessController.doPrivileged(this );
0953: return ((Boolean) ret).booleanValue();
0954: } catch (PrivilegedActionException pae) {
0955: // method executed under this priveleged block
0956: // does not throw an exception
0957: return false;
0958: } finally {
0959: actionStorageFile = null;
0960: }
0961: }
0962:
0963: private synchronized boolean privDelete(StorageFile file) {
0964: actionCode = STORAGE_FILE_DELETE_ACTION;
0965: actionStorageFile = file;
0966:
0967: try {
0968: Object ret = AccessController.doPrivileged(this );
0969: return ((Boolean) ret).booleanValue();
0970: } catch (PrivilegedActionException pae) {
0971: // method executed under this priveleged block
0972: // does not throw an exception
0973: return false;
0974: } finally {
0975: actionStorageFile = null;
0976: }
0977: }
0978:
0979: private synchronized OutputStream privGetOutputStream(
0980: StorageFile file) throws FileNotFoundException {
0981: actionCode = STORAGE_FILE_GET_OUTPUT_STREAM_ACTION;
0982: actionStorageFile = file;
0983:
0984: try {
0985: return (OutputStream) AccessController.doPrivileged(this );
0986: } catch (PrivilegedActionException pae) {
0987: throw (FileNotFoundException) pae.getException();
0988: } finally {
0989: actionStorageFile = null;
0990: }
0991: }
0992:
0993: private synchronized InputStream privGetInputStream(StorageFile file)
0994: throws FileNotFoundException {
0995: actionCode = STORAGE_FILE_GET_INPUT_STREAM_ACTION;
0996: actionStorageFile = file;
0997:
0998: try {
0999: return (InputStream) AccessController.doPrivileged(this );
1000: } catch (PrivilegedActionException pae) {
1001: throw (FileNotFoundException) pae.getException();
1002: } finally {
1003: actionStorageFile = null;
1004: }
1005: }
1006:
1007: // PrivilegedAction method
1008: public Object run() throws FileNotFoundException {
1009: switch (actionCode) {
1010: case STORAGE_FILE_EXISTS_ACTION:
1011: return ReuseFactory.getBoolean(actionStorageFile.exists());
1012: case STORAGE_FILE_DELETE_ACTION:
1013: return ReuseFactory.getBoolean(actionStorageFile.delete());
1014: case STORAGE_FILE_MKDIRS_ACTION:
1015: return ReuseFactory.getBoolean(actionStorageFile.mkdirs());
1016: case STORAGE_FILE_GET_OUTPUT_STREAM_ACTION:
1017: return actionStorageFile.getOutputStream();
1018: case STORAGE_FILE_GET_INPUT_STREAM_ACTION:
1019: return actionStorageFile.getInputStream();
1020: }
1021:
1022: return null;
1023: }
1024:
1025: }
|