0001: /*
0002:
0003: Derby - Class org.apache.derby.client.net.Request
0004:
0005: Licensed to the Apache Software Foundation (ASF) under one or more
0006: contributor license agreements. See the NOTICE file distributed with
0007: this work for additional information regarding copyright ownership.
0008: The ASF licenses this file to You under the Apache License, Version 2.0
0009: (the "License"); you may not use this file except in compliance with
0010: the License. You may obtain a copy of the License at
0011:
0012: http://www.apache.org/licenses/LICENSE-2.0
0013:
0014: Unless required by applicable law or agreed to in writing, software
0015: distributed under the License is distributed on an "AS IS" BASIS,
0016: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017: See the License for the specific language governing permissions and
0018: limitations under the License.
0019:
0020: */
0021: package org.apache.derby.client.net;
0022:
0023: import org.apache.derby.client.am.DisconnectException;
0024: import org.apache.derby.client.am.EncryptionManager;
0025: import org.apache.derby.client.am.ClientMessageId;
0026: import org.apache.derby.client.am.SqlException;
0027: import org.apache.derby.client.am.Utils;
0028: import org.apache.derby.shared.common.reference.SQLState;
0029:
0030: import java.io.UnsupportedEncodingException;
0031:
0032: public class Request {
0033:
0034: // byte array buffer used for constructing requests.
0035: // currently requests are built starting at the beginning of the buffer.
0036: protected byte[] bytes_;
0037:
0038: // keeps track of the next position to place a byte in the buffer.
0039: // so the last valid byte in the message is at bytes_[offset - 1]
0040: protected int offset_;
0041:
0042: // a stack is used to keep track of offsets into the buffer where 2 byte
0043: // ddm length values are located. these length bytes will be automatically updated
0044: // by this object when construction of a particular object has completed.
0045: // right now the max size of the stack is 10. this is an arbitrary number which
0046: // should be sufficiently large enough to handle all situations.
0047: private final static int MAX_MARKS_NESTING = 10;
0048: private int[] markStack_ = new int[MAX_MARKS_NESTING];
0049: private int top_ = 0;
0050:
0051: // the ccsid manager for the connection is stored in this object. it will
0052: // be used when constructing character ddm data. it will NOT be used for
0053: // building any FDOCA data.
0054: protected CcsidManager ccsidManager_;
0055:
0056: // This Object tracks the location of the current
0057: // Dss header length bytes. This is done so
0058: // the length bytes can be automatically
0059: // updated as information is added to this stream.
0060: private int dssLengthLocation_ = 0;
0061:
0062: // tracks the request correlation ID to use for commands and command objects.
0063: // this is automatically updated as commands are built and sent to the server.
0064: private int correlationID_ = 0;
0065:
0066: private boolean simpleDssFinalize = false;
0067:
0068: // Used to mask out password when trace is on.
0069: protected boolean passwordIncluded_ = false;
0070: protected int passwordStart_ = 0;
0071: protected int passwordLength_ = 0;
0072:
0073: protected NetAgent netAgent_;
0074:
0075: // construct a request object specifying the minimum buffer size
0076: // to be used to buffer up the built requests. also specify the ccsid manager
0077: // instance to be used when building ddm character data.
0078: Request(NetAgent netAgent, int minSize, CcsidManager ccsidManager) {
0079: netAgent_ = netAgent;
0080: bytes_ = new byte[minSize];
0081: ccsidManager_ = ccsidManager;
0082: clearBuffer();
0083: }
0084:
0085: // construct a request object specifying the ccsid manager instance
0086: // to be used when building ddm character data. This will also create
0087: // a buffer using the default size (see final static DEFAULT_BUFFER_SIZE value).
0088: Request(NetAgent netAgent, CcsidManager ccsidManager, int bufferSize) {
0089: //this (netAgent, Request.DEFAULT_BUFFER_SIZE, ccsidManager);
0090: this (netAgent, bufferSize, ccsidManager);
0091: }
0092:
0093: protected final void clearBuffer() {
0094: offset_ = 0;
0095: top_ = 0;
0096: for (int i = 0; i < markStack_.length; i++) {
0097: if (markStack_[i] != 0) {
0098: markStack_[i] = 0;
0099: } else {
0100: break;
0101: }
0102: }
0103: dssLengthLocation_ = 0;
0104: }
0105:
0106: final void initialize() {
0107: clearBuffer();
0108: correlationID_ = 0;
0109: }
0110:
0111: // set the ccsid manager value. this method allows the ccsid manager to be
0112: // changed so a request object can be reused by different connections with
0113: // different ccsid managers.
0114: final void setCcsidMgr(CcsidManager ccsidManager) {
0115: ccsidManager_ = ccsidManager;
0116: }
0117:
0118: // ensure length at the end of the buffer for a certain amount of data.
0119: // if the buffer does not contain sufficient room for the data, the buffer
0120: // will be expanded by the larger of (2 * current size) or (current size + length).
0121: // the data from the previous buffer is copied into the larger buffer.
0122: protected final void ensureLength(int length) {
0123: if (length > bytes_.length) {
0124: byte newBytes[] = new byte[Math.max(bytes_.length << 1,
0125: length)];
0126: System.arraycopy(bytes_, 0, newBytes, 0, offset_);
0127: bytes_ = newBytes;
0128: }
0129: }
0130:
0131: // creates an request dss in the buffer to contain a ddm command
0132: // object. calling this method means any previous dss objects in
0133: // the buffer are complete and their length and chaining bytes can
0134: // be updated appropriately.
0135: protected final void createCommand() {
0136: buildDss(false, false, false, DssConstants.GDSFMT_RQSDSS,
0137: ++correlationID_, false);
0138: }
0139:
0140: // creates an request dss in the buffer to contain a ddm command
0141: // object. calling this method means any previous dss objects in
0142: // the buffer are complete and their length and chaining bytes can
0143: // be updated appropriately.
0144: protected void createXACommand() {
0145: buildDss(false, false, false,
0146: DssConstants.GDSFMT_RQSDSS_NOREPLY, ++correlationID_,
0147: false);
0148: }
0149:
0150: // creates an object dss in the buffer to contain a ddm command
0151: // data object. calling this method means any previous dss objects in
0152: // the buffer are complete and their length and chaining bytes can
0153: // be updated appropriately.
0154: final void createCommandData() {
0155: buildDss(true, false, false, DssConstants.GDSFMT_OBJDSS,
0156: correlationID_, false);
0157: }
0158:
0159: final void createEncryptedCommandData() {
0160: if (netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRIDDTA
0161: || netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRPWDDTA) {
0162: buildDss(true, false, false, DssConstants.GDSFMT_ENCOBJDSS,
0163: correlationID_, false);
0164: } else {
0165: buildDss(true, false, false, DssConstants.GDSFMT_OBJDSS,
0166: correlationID_, false);
0167: }
0168: }
0169:
0170: // experimental lob section
0171:
0172: private final void buildDss(boolean dssHasSameCorrelator,
0173: boolean chainedToNextStructure,
0174: boolean nextHasSameCorrelator, int dssType, int corrId,
0175: boolean simpleFinalizeBuildingNextDss) {
0176: if (doesRequestContainData()) {
0177: if (simpleDssFinalize) {
0178: finalizeDssLength();
0179: } else {
0180: finalizePreviousChainedDss(dssHasSameCorrelator);
0181: }
0182: }
0183:
0184: ensureLength(offset_ + 6);
0185:
0186: // save the length position and skip
0187: // note: the length position is saved so it can be updated
0188: // with a different value later.
0189: dssLengthLocation_ = offset_;
0190: // always turn on chaining flags... this is helpful for lobs...
0191: // these bytes will get rest if dss lengths are finalized.
0192: bytes_[offset_++] = (byte) 0xFF;
0193: bytes_[offset_++] = (byte) 0xFF;
0194:
0195: // insert the manditory 0xD0 and the dssType
0196: bytes_[offset_++] = (byte) 0xD0;
0197: if (chainedToNextStructure) {
0198: dssType |= DssConstants.GDSCHAIN;
0199: if (nextHasSameCorrelator) {
0200: dssType |= DssConstants.GDSCHAIN_SAME_ID;
0201: }
0202: }
0203: bytes_[offset_++] = (byte) (dssType & 0xff);
0204:
0205: // write the request correlation id
0206: // use method that writes a short
0207: bytes_[offset_++] = (byte) ((corrId >>> 8) & 0xff);
0208: bytes_[offset_++] = (byte) (corrId & 0xff);
0209:
0210: simpleDssFinalize = simpleFinalizeBuildingNextDss;
0211: }
0212:
0213: final void writeScalarStream(boolean chained,
0214: boolean chainedWithSameCorrelator, int codePoint,
0215: int length, java.io.InputStream in, boolean writeNullByte,
0216: int parameterIndex) throws DisconnectException,
0217: SqlException {
0218:
0219: if (netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRIDDTA
0220: || netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRPWDDTA) {
0221:
0222: writeEncryptedScalarStream(chained,
0223: chainedWithSameCorrelator, codePoint, length, in,
0224: writeNullByte, parameterIndex);
0225:
0226: } else {
0227:
0228: writePlainScalarStream(chained, chainedWithSameCorrelator,
0229: codePoint, length, in, writeNullByte,
0230: parameterIndex);
0231:
0232: }
0233:
0234: }
0235:
0236: // We need to reuse the agent's sql exception accumulation mechanism
0237: // for this write exception, pad if the length is too big, and truncation if the length is too small
0238: final void writeEncryptedScalarStream(boolean chained,
0239: boolean chainedWithSameCorrelator, int codePoint,
0240: int length, java.io.InputStream in, boolean writeNullByte,
0241: int parameterIndex) throws DisconnectException,
0242: SqlException {
0243:
0244: int leftToRead = length;
0245: int extendedLengthByteCount = prepScalarStream(chained,
0246: chainedWithSameCorrelator, writeNullByte, leftToRead);
0247: int bytesToRead;
0248:
0249: if (writeNullByte) {
0250: bytesToRead = Utils.min(leftToRead,
0251: DssConstants.MAX_DSS_LEN - 6 - 4 - 1
0252: - extendedLengthByteCount);
0253: } else {
0254: bytesToRead = Utils.min(leftToRead,
0255: DssConstants.MAX_DSS_LEN - 6 - 4
0256: - extendedLengthByteCount);
0257: }
0258:
0259: byte[] lengthAndCodepoint;
0260: lengthAndCodepoint = buildLengthAndCodePointForEncryptedLob(
0261: codePoint, leftToRead, writeNullByte,
0262: extendedLengthByteCount);
0263:
0264: // we need to stream the input, rather than fully materialize it
0265: // write the data
0266:
0267: byte[] clearedBytes = new byte[leftToRead];
0268: int bytesRead = 0;
0269: int totalBytesRead = 0;
0270: int pos = 0;
0271: do {
0272: try {
0273: bytesRead = in.read(clearedBytes, pos, leftToRead);
0274: totalBytesRead += bytesRead;
0275: } catch (java.io.IOException e) {
0276: padScalarStreamForError(leftToRead, bytesToRead);
0277: // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.
0278: netAgent_
0279: .accumulateReadException(new SqlException(
0280: netAgent_.logWriter_,
0281: new ClientMessageId(
0282: SQLState.NET_IOEXCEPTION_ON_READ),
0283: new Integer(parameterIndex), e
0284: .getMessage(), e));
0285: return;
0286: }
0287: if (bytesRead == -1) {
0288: //padScalarStreamForError(leftToRead, bytesToRead);
0289: // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.
0290: /*throw new SqlException(netAgent_.logWriter_,
0291: "End of Stream prematurely reached while reading InputStream, parameter #" +
0292: parameterIndex +
0293: ". Remaining data has been padded with 0x0.");*/
0294: //is it OK to do a chain break Exception here. It's not good to
0295: //pad it with 0 and encrypt and send it to the server because it takes too much time
0296: //can't just throw a SQLException either because some of the data PRPSQLSTT etc have already
0297: //been sent to the server, and server is waiting for EXTDTA, server hangs for this.
0298: netAgent_
0299: .accumulateChainBreakingReadExceptionAndThrow(new DisconnectException(
0300: netAgent_,
0301: new ClientMessageId(
0302: SQLState.NET_PREMATURE_EOS_DISCONNECT),
0303: new Integer(parameterIndex)));
0304: return;
0305:
0306: /*netAgent_.accumulateReadException(
0307: new SqlException(netAgent_.logWriter_,
0308: "End of Stream prematurely reached while reading InputStream, parameter #" +
0309: parameterIndex +
0310: ". Remaining data has been padded with 0x0."));
0311: return;*/
0312: } else {
0313: pos += bytesRead;
0314: //offset_ += bytesRead; //comment this out for data stream encryption.
0315: leftToRead -= bytesRead;
0316: }
0317:
0318: } while (leftToRead > 0);
0319:
0320: // check to make sure that the specified length wasn't too small
0321: try {
0322: if (in.read() != -1) {
0323: // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.
0324: netAgent_
0325: .accumulateReadException(new SqlException(
0326: netAgent_.logWriter_,
0327: new ClientMessageId(
0328: SQLState.NET_INPUTSTREAM_LENGTH_TOO_SMALL),
0329: new Integer(parameterIndex)));
0330: }
0331: } catch (java.io.IOException e) {
0332: netAgent_
0333: .accumulateReadException(new SqlException(
0334: netAgent_.logWriter_,
0335: new ClientMessageId(
0336: SQLState.NET_IOEXCEPTION_ON_STREAMLEN_VERIFICATION),
0337: new Integer(parameterIndex),
0338: e.getMessage(), e));
0339: }
0340:
0341: byte[] newClearedBytes = new byte[clearedBytes.length
0342: + lengthAndCodepoint.length];
0343: System.arraycopy(lengthAndCodepoint, 0, newClearedBytes, 0,
0344: lengthAndCodepoint.length);
0345: System.arraycopy(clearedBytes, 0, newClearedBytes,
0346: lengthAndCodepoint.length, clearedBytes.length);
0347: //it's wrong here, need to add in the real length after the codepoing 146c
0348: byte[] encryptedBytes;
0349: encryptedBytes = netAgent_.netConnection_
0350: .getEncryptionManager().encryptData(newClearedBytes,
0351: NetConfiguration.SECMEC_EUSRIDPWD,
0352: netAgent_.netConnection_.getTargetPublicKey(),
0353: netAgent_.netConnection_.getTargetPublicKey());
0354:
0355: int encryptedBytesLength = encryptedBytes.length;
0356: int sendingLength = bytes_.length - offset_;
0357: if (encryptedBytesLength > (bytes_.length - offset_)) {
0358:
0359: System.arraycopy(encryptedBytes, 0, bytes_, offset_,
0360: (bytes_.length - offset_));
0361: offset_ = 32767;
0362: try {
0363: sendBytes(netAgent_.getOutputStream());
0364: } catch (java.io.IOException ioe) {
0365: netAgent_.throwCommunicationsFailure(ioe);
0366: }
0367: } else {
0368: System.arraycopy(encryptedBytes, 0, bytes_, offset_,
0369: encryptedBytesLength);
0370: offset_ = offset_ + encryptedBytes.length;
0371: }
0372:
0373: encryptedBytesLength = encryptedBytesLength - sendingLength;
0374: while (encryptedBytesLength > 0) {
0375: //dssLengthLocation_ = offset_;
0376: offset_ = 0;
0377:
0378: if ((encryptedBytesLength - 32765) > 0) {
0379: bytes_[offset_++] = (byte) (0xff);
0380: bytes_[offset_++] = (byte) (0xff);
0381: System.arraycopy(encryptedBytes, sendingLength, bytes_,
0382: offset_, 32765);
0383: encryptedBytesLength -= 32765;
0384: sendingLength += 32765;
0385: offset_ = 32767;
0386: try {
0387: sendBytes(netAgent_.getOutputStream());
0388: } catch (java.io.IOException ioe) {
0389: netAgent_.throwCommunicationsFailure(ioe);
0390: }
0391: } else {
0392: int leftlength = encryptedBytesLength + 2;
0393: bytes_[offset_++] = (byte) ((leftlength >>> 8) & 0xff);
0394: bytes_[offset_++] = (byte) (leftlength & 0xff);
0395:
0396: System.arraycopy(encryptedBytes, sendingLength, bytes_,
0397: offset_, encryptedBytesLength);
0398:
0399: offset_ += encryptedBytesLength;
0400: dssLengthLocation_ = offset_;
0401: encryptedBytesLength = 0;
0402: }
0403:
0404: }
0405: }
0406:
0407: // We need to reuse the agent's sql exception accumulation mechanism
0408: // for this write exception, pad if the length is too big, and truncation if the length is too small
0409: final void writePlainScalarStream(boolean chained,
0410: boolean chainedWithSameCorrelator, int codePoint,
0411: int length, java.io.InputStream in, boolean writeNullByte,
0412: int parameterIndex) throws DisconnectException,
0413: SqlException {
0414: int leftToRead = length;
0415: int extendedLengthByteCount = prepScalarStream(chained,
0416: chainedWithSameCorrelator, writeNullByte, leftToRead);
0417: int bytesToRead;
0418:
0419: if (writeNullByte) {
0420: bytesToRead = Utils.min(leftToRead,
0421: DssConstants.MAX_DSS_LEN - 6 - 4 - 1
0422: - extendedLengthByteCount);
0423: } else {
0424: bytesToRead = Utils.min(leftToRead,
0425: DssConstants.MAX_DSS_LEN - 6 - 4
0426: - extendedLengthByteCount);
0427: }
0428:
0429: buildLengthAndCodePointForLob(codePoint, leftToRead,
0430: writeNullByte, extendedLengthByteCount);
0431:
0432: int bytesRead = 0;
0433: int totalBytesRead = 0;
0434: do {
0435: do {
0436: try {
0437: bytesRead = in.read(bytes_, offset_, bytesToRead);
0438: totalBytesRead += bytesRead;
0439: } catch (java.io.IOException e) {
0440: padScalarStreamForError(leftToRead, bytesToRead);
0441: // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.
0442: netAgent_.accumulateReadException(new SqlException(
0443: netAgent_.logWriter_, new ClientMessageId(
0444: SQLState.NET_IOEXCEPTION_ON_READ),
0445: new Integer(parameterIndex),
0446: e.getMessage(), e));
0447:
0448: return;
0449: }
0450: if (bytesRead == -1) {
0451: padScalarStreamForError(leftToRead, bytesToRead);
0452: // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.
0453: netAgent_.accumulateReadException(new SqlException(
0454: netAgent_.logWriter_, new ClientMessageId(
0455: SQLState.NET_PREMATURE_EOS),
0456: new Integer(parameterIndex)));
0457: return;
0458: } else {
0459: bytesToRead -= bytesRead;
0460: offset_ += bytesRead;
0461: leftToRead -= bytesRead;
0462: }
0463: } while (bytesToRead > 0);
0464:
0465: bytesToRead = flushScalarStreamSegment(leftToRead,
0466: bytesToRead);
0467: } while (leftToRead > 0);
0468:
0469: // check to make sure that the specified length wasn't too small
0470: try {
0471: if (in.read() != -1) {
0472: // set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.
0473: netAgent_
0474: .accumulateReadException(new SqlException(
0475: netAgent_.logWriter_,
0476: new ClientMessageId(
0477: SQLState.NET_INPUTSTREAM_LENGTH_TOO_SMALL),
0478: new Integer(parameterIndex)));
0479: }
0480: } catch (java.io.IOException e) {
0481: netAgent_
0482: .accumulateReadException(new SqlException(
0483: netAgent_.logWriter_,
0484: new ClientMessageId(
0485: SQLState.NET_IOEXCEPTION_ON_STREAMLEN_VERIFICATION),
0486: new Integer(parameterIndex),
0487: e.getMessage(), e));
0488: }
0489: }
0490:
0491: // Throw DataTruncation, instead of closing connection if input size mismatches
0492: // An implication of this, is that we need to extend the chaining model
0493: // for writes to accomodate chained write exceptoins
0494: final void writeScalarStream(boolean chained,
0495: boolean chainedWithSameCorrelator, int codePoint,
0496: int length, java.io.Reader r, boolean writeNullByte,
0497: int parameterIndex) throws DisconnectException,
0498: SqlException {
0499:
0500: writeScalarStream(chained, chainedWithSameCorrelator,
0501: codePoint, length * 2, EncodedInputStream
0502: .createUTF16BEStream(r), writeNullByte,
0503: parameterIndex);
0504: }
0505:
0506: // prepScalarStream does the following prep for writing stream data:
0507: // 1. Flushes an existing DSS segment, if necessary
0508: // 2. Determines if extended length bytes are needed
0509: // 3. Creates a new DSS/DDM header and a null byte indicator, if applicable
0510: protected final int prepScalarStream(boolean chained,
0511: boolean chainedWithSameCorrelator, boolean writeNullByte,
0512: int leftToRead) throws DisconnectException {
0513: int extendedLengthByteCount;
0514:
0515: int nullIndicatorSize = 0;
0516: if (writeNullByte) {
0517: // leftToRead is cast to (long) on the off chance that +4+1 pushes it outside the range of int
0518: extendedLengthByteCount = calculateExtendedLengthByteCount((long) leftToRead + 4 + 1);
0519: nullIndicatorSize = 1;
0520: } else {
0521: extendedLengthByteCount = calculateExtendedLengthByteCount(leftToRead + 4);
0522: }
0523:
0524: // flush the existing DSS segment if this stream will not fit in the send buffer
0525: // leftToRead is cast to (long) on the off chance that +4+1 pushes it outside the range of int
0526: if (10 + extendedLengthByteCount + nullIndicatorSize
0527: + (long) leftToRead + offset_ > DssConstants.MAX_DSS_LEN) {
0528: try {
0529: if (simpleDssFinalize) {
0530: finalizeDssLength();
0531: } else {
0532: finalizePreviousChainedDss(true);
0533: }
0534: sendBytes(netAgent_.getOutputStream());
0535: } catch (java.io.IOException e) {
0536: netAgent_.throwCommunicationsFailure(e);
0537: }
0538: }
0539:
0540: if (netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRIDDTA
0541: || netAgent_.netConnection_.getSecurityMechanism() == NetConfiguration.SECMEC_EUSRPWDDTA) {
0542: buildDss(true, chained, chainedWithSameCorrelator,
0543: DssConstants.GDSFMT_ENCOBJDSS, correlationID_, true);
0544: } else
0545: // buildDss should not call ensure length.
0546: {
0547: buildDss(true, chained, chainedWithSameCorrelator,
0548: DssConstants.GDSFMT_OBJDSS, correlationID_, true);
0549: }
0550:
0551: return extendedLengthByteCount;
0552: }
0553:
0554: // Writes out a scalar stream DSS segment, along with DSS continuation headers,
0555: // if necessary.
0556: protected final int flushScalarStreamSegment(int leftToRead,
0557: int bytesToRead) throws DisconnectException {
0558: int newBytesToRead = bytesToRead;
0559:
0560: // either at end of data, end of dss segment, or both.
0561: if (leftToRead != 0) {
0562: // 32k segment filled and not at end of data.
0563: if ((Utils.min(2 + leftToRead, 32767)) > (bytes_.length - offset_)) {
0564: try {
0565: sendBytes(netAgent_.getOutputStream());
0566: } catch (java.io.IOException ioe) {
0567: netAgent_.throwCommunicationsFailure(ioe);
0568: }
0569: }
0570: dssLengthLocation_ = offset_;
0571: bytes_[offset_++] = (byte) (0xff);
0572: bytes_[offset_++] = (byte) (0xff);
0573: newBytesToRead = Utils.min(leftToRead, 32765);
0574: }
0575:
0576: return newBytesToRead;
0577: }
0578:
0579: // the offset_ must not be updated when an error is encountered
0580: // note valid data may be overwritten
0581: protected final void padScalarStreamForError(int leftToRead,
0582: int bytesToRead) throws DisconnectException {
0583: do {
0584: do {
0585: bytes_[offset_++] = (byte) (0x0); // use 0x0 as the padding byte
0586: bytesToRead--;
0587: leftToRead--;
0588: } while (bytesToRead > 0);
0589:
0590: bytesToRead = flushScalarStreamSegment(leftToRead,
0591: bytesToRead);
0592: } while (leftToRead > 0);
0593: }
0594:
0595: private final void writeExtendedLengthBytes(
0596: int extendedLengthByteCount, long length) {
0597: int shiftSize = (extendedLengthByteCount - 1) * 8;
0598: for (int i = 0; i < extendedLengthByteCount; i++) {
0599: bytes_[offset_++] = (byte) ((length >>> shiftSize) & 0xff);
0600: shiftSize -= 8;
0601: }
0602: }
0603:
0604: private final byte[] writeExtendedLengthBytesForEncryption(
0605: int extendedLengthByteCount, long length) {
0606: int shiftSize = (extendedLengthByteCount - 1) * 8;
0607: byte[] extendedLengthBytes = new byte[extendedLengthByteCount];
0608: for (int i = 0; i < extendedLengthByteCount; i++) {
0609: extendedLengthBytes[i] = (byte) ((length >>> shiftSize) & 0xff);
0610: shiftSize -= 8;
0611: }
0612: return extendedLengthBytes;
0613: }
0614:
0615: // experimental lob section - end
0616:
0617: // used to finialize a dss which is already in the buffer
0618: // before another dss is built. this includes updating length
0619: // bytes and chaining bits.
0620: protected final void finalizePreviousChainedDss(
0621: boolean dssHasSameCorrelator) {
0622: finalizeDssLength();
0623: bytes_[dssLengthLocation_ + 3] |= 0x40;
0624: if (dssHasSameCorrelator) // for blobs
0625: {
0626: bytes_[dssLengthLocation_ + 3] |= 0x10;
0627: }
0628: }
0629:
0630: // method to determine if any data is in the request.
0631: // this indicates there is a dss object already in the buffer.
0632: protected final boolean doesRequestContainData() {
0633: return offset_ != 0;
0634: }
0635:
0636: // signal the completion of a Dss Layer A object. The length of
0637: // dss object will be calculated based on the difference between the
0638: // start of the dss, saved on the beginDss call, and the current
0639: // offset into the buffer which marks the end of the data. In the event
0640: // the length requires the use of continuation Dss headers, one for each 32k
0641: // chunk of data, the data will be shifted and the continuation headers
0642: // will be inserted with the correct values as needed.
0643: // Note: In the future, we may try to optimize this approach
0644: // in an attempt to avoid these shifts.
0645: protected final void finalizeDssLength() {
0646: // calculate the total size of the dss and the number of bytes which would
0647: // require continuation dss headers. The total length already includes the
0648: // the 6 byte dss header located at the beginning of the dss. It does not
0649: // include the length of any continuation headers.
0650: int totalSize = offset_ - dssLengthLocation_;
0651: int bytesRequiringContDssHeader = totalSize - 32767;
0652:
0653: // determine if continuation headers are needed
0654: if (bytesRequiringContDssHeader > 0) {
0655:
0656: // the continuation headers are needed, so calculate how many.
0657: // after the first 32767 worth of data, a continuation header is
0658: // needed for every 32765 bytes (32765 bytes of data + 2 bytes of
0659: // continuation header = 32767 Dss Max Size).
0660: int contDssHeaderCount = bytesRequiringContDssHeader / 32765;
0661: if (bytesRequiringContDssHeader % 32765 != 0) {
0662: contDssHeaderCount++;
0663: }
0664:
0665: // right now the code will shift to the right. In the future we may want
0666: // to try something fancier to help reduce the copying (maybe keep
0667: // space in the beginning of the buffer??).
0668: // the offset points to the next available offset in the buffer to place
0669: // a piece of data, so the last dataByte is at offset -1.
0670: // various bytes will need to be shifted by different amounts
0671: // depending on how many dss headers to insert so the amount to shift
0672: // will be calculated and adjusted as needed. ensure there is enough room
0673: // for all the conutinuation headers and adjust the offset to point to the
0674: // new end of the data.
0675: int dataByte = offset_ - 1;
0676: int shiftOffset = contDssHeaderCount * 2;
0677: ensureLength(offset_ + shiftOffset);
0678: offset_ += shiftOffset;
0679:
0680: // mark passOne to help with calculating the length of the final (first or
0681: // rightmost) continuation header.
0682: boolean passOne = true;
0683: do {
0684: // calculate chunk of data to shift
0685: int dataToShift = bytesRequiringContDssHeader % 32765;
0686: if (dataToShift == 0) {
0687: dataToShift = 32765;
0688: }
0689:
0690: // perform the shift
0691: dataByte -= dataToShift;
0692: System.arraycopy(bytes_, dataByte + 1, bytes_, dataByte
0693: + shiftOffset + 1, dataToShift);
0694:
0695: // calculate the value the value of the 2 byte continuation dss header which
0696: // includes the length of itself. On the first pass, if the length is 32767
0697: // we do not want to set the continuation dss header flag.
0698: int twoByteContDssHeader = dataToShift + 2;
0699: if (passOne) {
0700: passOne = false;
0701: } else {
0702: if (twoByteContDssHeader == 32767) {
0703: twoByteContDssHeader = 0xFFFF;
0704: }
0705: }
0706:
0707: // insert the header's length bytes
0708: bytes_[dataByte + shiftOffset - 1] = (byte) ((twoByteContDssHeader >>> 8) & 0xff);
0709: bytes_[dataByte + shiftOffset] = (byte) (twoByteContDssHeader & 0xff);
0710:
0711: // adjust the bytesRequiringContDssHeader and the amount to shift for
0712: // data in upstream headers.
0713: bytesRequiringContDssHeader -= dataToShift;
0714: shiftOffset -= 2;
0715:
0716: // shift and insert another header for more data.
0717: } while (bytesRequiringContDssHeader > 0);
0718:
0719: // set the continuation dss header flag on for the first header
0720: totalSize = 0xFFFF;
0721:
0722: }
0723:
0724: // insert the length bytes in the 6 byte dss header.
0725: bytes_[dssLengthLocation_] = (byte) ((totalSize >>> 8) & 0xff);
0726: bytes_[dssLengthLocation_ + 1] = (byte) (totalSize & 0xff);
0727: }
0728:
0729: // mark the location of a two byte ddm length field in the buffer,
0730: // skip the length bytes for later update, and insert a ddm codepoint
0731: // into the buffer. The value of the codepoint is not checked.
0732: // this length will be automatically updated when construction of
0733: // the ddm object is complete (see updateLengthBytes method).
0734: // Note: this mechanism handles extended length ddms.
0735: protected final void markLengthBytes(int codePoint) {
0736: ensureLength(offset_ + 4);
0737:
0738: // save the location of length bytes in the mark stack.
0739: mark();
0740:
0741: // skip the length bytes and insert the codepoint
0742: offset_ += 2;
0743: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
0744: bytes_[offset_++] = (byte) (codePoint & 0xff);
0745: }
0746:
0747: // mark an offest into the buffer by placing the current offset value on
0748: // a stack.
0749: private final void mark() {
0750: markStack_[top_++] = offset_;
0751: }
0752:
0753: // remove and return the top offset value from mark stack.
0754: private final int popMark() {
0755: return markStack_[--top_];
0756: }
0757:
0758: protected final void markForCachingPKGNAMCSN() {
0759: mark();
0760: }
0761:
0762: protected final int popMarkForCachingPKGNAMCSN() {
0763: return popMark();
0764: }
0765:
0766: // Called to update the last ddm length bytes marked (lengths are updated
0767: // in the reverse order that they are marked). It is up to the caller
0768: // to make sure length bytes were marked before calling this method.
0769: // If the length requires ddm extended length bytes, the data will be
0770: // shifted as needed and the extended length bytes will be automatically
0771: // inserted.
0772: protected final void updateLengthBytes() throws SqlException {
0773: // remove the top length location offset from the mark stack\
0774: // calculate the length based on the marked location and end of data.
0775: int lengthLocation = popMark();
0776: int length = offset_ - lengthLocation;
0777:
0778: // determine if any extended length bytes are needed. the value returned
0779: // from calculateExtendedLengthByteCount is the number of extended length
0780: // bytes required. 0 indicates no exteneded length.
0781: int extendedLengthByteCount = calculateExtendedLengthByteCount(length);
0782: if (extendedLengthByteCount != 0) {
0783:
0784: // ensure there is enough room in the buffer for the extended length bytes.
0785: ensureLength(offset_ + extendedLengthByteCount);
0786:
0787: // calculate the length to be placed in the extended length bytes.
0788: // this length does not include the 4 byte llcp.
0789: int extendedLength = length - 4;
0790:
0791: // shift the data to the right by the number of extended length bytes needed.
0792: int extendedLengthLocation = lengthLocation + 4;
0793: System.arraycopy(bytes_, extendedLengthLocation, bytes_,
0794: extendedLengthLocation + extendedLengthByteCount,
0795: extendedLength);
0796:
0797: // write the extended length
0798: int shiftSize = (extendedLengthByteCount - 1) * 8;
0799: for (int i = 0; i < extendedLengthByteCount; i++) {
0800: bytes_[extendedLengthLocation++] = (byte) ((extendedLength >>> shiftSize) & 0xff);
0801: shiftSize -= 8;
0802: }
0803: // adjust the offset to account for the shift and insert
0804: offset_ += extendedLengthByteCount;
0805:
0806: // the two byte length field before the codepoint contains the length
0807: // of itself, the length of the codepoint, and the number of bytes used
0808: // to hold the extended length. the 2 byte length field also has the first
0809: // bit on to indicate extended length bytes were used.
0810: length = extendedLengthByteCount + 4;
0811: length |= 0x8000;
0812: }
0813:
0814: // write the 2 byte length field (2 bytes before codepoint).
0815: bytes_[lengthLocation] = (byte) ((length >>> 8) & 0xff);
0816: bytes_[lengthLocation + 1] = (byte) (length & 0xff);
0817: }
0818:
0819: // helper method to calculate the minimum number of extended length bytes needed
0820: // for a ddm. a return value of 0 indicates no extended length needed.
0821: private final int calculateExtendedLengthByteCount(long ddmSize) //throws SqlException
0822: {
0823: // according to Jim and some tests perfomred on Lob data,
0824: // the extended length bytes are signed. Assume that
0825: // if this is the case for Lobs, it is the case for
0826: // all extended length scenarios.
0827: if (ddmSize <= 0x7FFF) {
0828: return 0;
0829: } else if (ddmSize <= 0x7FFFFFFFL) {
0830: return 4;
0831: } else if (ddmSize <= 0x7FFFFFFFFFFFL) {
0832: return 6;
0833: } else {
0834: return 8;
0835: }
0836: }
0837:
0838: // insert the padByte into the buffer by length number of times.
0839: final void padBytes(byte padByte, int length) {
0840: ensureLength(offset_ + length);
0841: for (int i = 0; i < length; i++) {
0842: bytes_[offset_++] = padByte;
0843: }
0844: }
0845:
0846: // insert an unsigned single byte value into the buffer.
0847: final void write1Byte(int value) {
0848: ensureLength(offset_ + 1);
0849: bytes_[offset_++] = (byte) (value & 0xff);
0850: }
0851:
0852: // insert 3 unsigned bytes into the buffer. this was
0853: // moved up from NetStatementRequest for performance
0854: final void buildTripletHeader(int tripletLength, int tripletType,
0855: int tripletId) {
0856: ensureLength(offset_ + 3);
0857: bytes_[offset_++] = (byte) (tripletLength & 0xff);
0858: bytes_[offset_++] = (byte) (tripletType & 0xff);
0859: bytes_[offset_++] = (byte) (tripletId & 0xff);
0860: }
0861:
0862: final void writeLidAndLengths(int[][] lidAndLengthOverrides,
0863: int count, int offset) {
0864: ensureLength(offset_ + (count * 3));
0865: for (int i = 0; i < count; i++, offset++) {
0866: bytes_[offset_++] = (byte) (lidAndLengthOverrides[offset][0] & 0xff);
0867: bytes_[offset_++] = (byte) ((lidAndLengthOverrides[offset][1] >>> 8) & 0xff);
0868: bytes_[offset_++] = (byte) (lidAndLengthOverrides[offset][1] & 0xff);
0869: }
0870: }
0871:
0872: // if mdd overrides are not required, lids and lengths are copied straight into the
0873: // buffer.
0874: // otherwise, lookup the protocolType in the map. if an entry exists, substitute the
0875: // protocolType with the corresponding override lid.
0876: final void writeLidAndLengths(int[][] lidAndLengthOverrides,
0877: int count, int offset, boolean mddRequired,
0878: java.util.Hashtable map) {
0879: if (!mddRequired) {
0880: writeLidAndLengths(lidAndLengthOverrides, count, offset);
0881: }
0882: // if mdd overrides are required, lookup the protocolType in the map, and substitute
0883: // the protocolType with the override lid.
0884: else {
0885: ensureLength(offset_ + (count * 3));
0886: int protocolType, overrideLid;
0887: Object entry;
0888: for (int i = 0; i < count; i++, offset++) {
0889: protocolType = lidAndLengthOverrides[offset][0];
0890: // lookup the protocolType in the protocolType->overrideLid map
0891: // if an entry exists, replace the protocolType with the overrideLid
0892: entry = map.get(new Integer(protocolType));
0893: overrideLid = (entry == null) ? protocolType
0894: : ((Integer) entry).intValue();
0895: bytes_[offset_++] = (byte) (overrideLid & 0xff);
0896: bytes_[offset_++] = (byte) ((lidAndLengthOverrides[offset][1] >>> 8) & 0xff);
0897: bytes_[offset_++] = (byte) (lidAndLengthOverrides[offset][1] & 0xff);
0898: }
0899: }
0900: }
0901:
0902: // perf end
0903:
0904: // insert a big endian unsigned 2 byte value into the buffer.
0905: final void write2Bytes(int value) {
0906: ensureLength(offset_ + 2);
0907: bytes_[offset_++] = (byte) ((value >>> 8) & 0xff);
0908: bytes_[offset_++] = (byte) (value & 0xff);
0909: }
0910:
0911: // insert a big endian unsigned 4 byte value into the buffer.
0912: final void write4Bytes(long value) {
0913: ensureLength(offset_ + 4);
0914: bytes_[offset_++] = (byte) ((value >>> 24) & 0xff);
0915: bytes_[offset_++] = (byte) ((value >>> 16) & 0xff);
0916: bytes_[offset_++] = (byte) ((value >>> 8) & 0xff);
0917: bytes_[offset_++] = (byte) (value & 0xff);
0918: }
0919:
0920: // copy length number of bytes starting at offset 0 of the byte array, buf,
0921: // into the buffer. it is up to the caller to make sure buf has at least length
0922: // number of elements. no checking will be done by this method.
0923: final void writeBytes(byte[] buf, int length) {
0924: ensureLength(offset_ + length);
0925: System.arraycopy(buf, 0, bytes_, offset_, length);
0926: offset_ += length;
0927: }
0928:
0929: final void writeBytes(byte[] buf) {
0930: ensureLength(offset_ + buf.length);
0931: System.arraycopy(buf, 0, bytes_, offset_, buf.length);
0932: offset_ += buf.length;
0933: }
0934:
0935: // insert a pair of unsigned 2 byte values into the buffer.
0936: final void writeCodePoint4Bytes(int codePoint, int value) { // should this be writeCodePoint2Bytes
0937: ensureLength(offset_ + 4);
0938: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
0939: bytes_[offset_++] = (byte) (codePoint & 0xff);
0940: bytes_[offset_++] = (byte) ((value >>> 8) & 0xff);
0941: bytes_[offset_++] = (byte) (value & 0xff);
0942: }
0943:
0944: // insert a 4 byte length/codepoint pair and a 1 byte unsigned value into the buffer.
0945: // total of 5 bytes inserted in buffer.
0946: protected final void writeScalar1Byte(int codePoint, int value) {
0947: ensureLength(offset_ + 5);
0948: bytes_[offset_++] = 0x00;
0949: bytes_[offset_++] = 0x05;
0950: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
0951: bytes_[offset_++] = (byte) (codePoint & 0xff);
0952: bytes_[offset_++] = (byte) (value & 0xff);
0953: }
0954:
0955: // insert a 4 byte length/codepoint pair and a 2 byte unsigned value into the buffer.
0956: // total of 6 bytes inserted in buffer.
0957: final void writeScalar2Bytes(int codePoint, int value) {
0958: ensureLength(offset_ + 6);
0959: bytes_[offset_++] = 0x00;
0960: bytes_[offset_++] = 0x06;
0961: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
0962: bytes_[offset_++] = (byte) (codePoint & 0xff);
0963: bytes_[offset_++] = (byte) ((value >>> 8) & 0xff);
0964: bytes_[offset_++] = (byte) (value & 0xff);
0965: }
0966:
0967: // insert a 4 byte length/codepoint pair and a 4 byte unsigned value into the
0968: // buffer. total of 8 bytes inserted in the buffer.
0969: protected final void writeScalar4Bytes(int codePoint, long value) {
0970: ensureLength(offset_ + 8);
0971: bytes_[offset_++] = 0x00;
0972: bytes_[offset_++] = 0x08;
0973: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
0974: bytes_[offset_++] = (byte) (codePoint & 0xff);
0975: bytes_[offset_++] = (byte) ((value >>> 24) & 0xff);
0976: bytes_[offset_++] = (byte) ((value >>> 16) & 0xff);
0977: bytes_[offset_++] = (byte) ((value >>> 8) & 0xff);
0978: bytes_[offset_++] = (byte) (value & 0xff);
0979: }
0980:
0981: // insert a 4 byte length/codepoint pair and a 8 byte unsigned value into the
0982: // buffer. total of 12 bytes inserted in the buffer.
0983: final void writeScalar8Bytes(int codePoint, long value) {
0984: ensureLength(offset_ + 12);
0985: bytes_[offset_++] = 0x00;
0986: bytes_[offset_++] = 0x0C;
0987: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
0988: bytes_[offset_++] = (byte) (codePoint & 0xff);
0989: bytes_[offset_++] = (byte) ((value >>> 56) & 0xff);
0990: bytes_[offset_++] = (byte) ((value >>> 48) & 0xff);
0991: bytes_[offset_++] = (byte) ((value >>> 40) & 0xff);
0992: bytes_[offset_++] = (byte) ((value >>> 32) & 0xff);
0993: bytes_[offset_++] = (byte) ((value >>> 24) & 0xff);
0994: bytes_[offset_++] = (byte) ((value >>> 16) & 0xff);
0995: bytes_[offset_++] = (byte) ((value >>> 8) & 0xff);
0996: bytes_[offset_++] = (byte) (value & 0xff);
0997: }
0998:
0999: // insert a 4 byte length/codepoint pair into the buffer.
1000: // total of 4 bytes inserted in buffer.
1001: // Note: the length value inserted in the buffer is the same as the value
1002: // passed in as an argument (this value is NOT incremented by 4 before being
1003: // inserted).
1004: final void writeLengthCodePoint(int length, int codePoint) {
1005: ensureLength(offset_ + 4);
1006: bytes_[offset_++] = (byte) ((length >>> 8) & 0xff);
1007: bytes_[offset_++] = (byte) (length & 0xff);
1008: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
1009: bytes_[offset_++] = (byte) (codePoint & 0xff);
1010: }
1011:
1012: final byte[] writeEXTDTALengthCodePointForEncryption(int length,
1013: int codePoint) {
1014: //how to encure length and offset later?
1015: byte[] clearedBytes = new byte[4];
1016: clearedBytes[0] = (byte) ((length >>> 8) & 0xff);
1017: clearedBytes[1] = (byte) (length & 0xff);
1018: clearedBytes[2] = (byte) ((codePoint >>> 8) & 0xff);
1019: clearedBytes[3] = (byte) (codePoint & 0xff);
1020: return clearedBytes;
1021: }
1022:
1023: // insert a 4 byte length/codepoint pair into the buffer followed
1024: // by length number of bytes copied from array buf starting at offset 0.
1025: // the length of this scalar must not exceed the max for the two byte length
1026: // field. This method does not support extended length. The length
1027: // value inserted in the buffer includes the number of bytes to copy plus
1028: // the size of the llcp (or length + 4). It is up to the caller to make sure
1029: // the array, buf, contains at least length number of bytes.
1030: final void writeScalarBytes(int codePoint, byte[] buf, int length) {
1031: ensureLength(offset_ + length + 4);
1032: bytes_[offset_++] = (byte) (((length + 4) >>> 8) & 0xff);
1033: bytes_[offset_++] = (byte) ((length + 4) & 0xff);
1034: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
1035: bytes_[offset_++] = (byte) (codePoint & 0xff);
1036: for (int i = 0; i < length; i++) {
1037: bytes_[offset_++] = buf[i];
1038: }
1039: }
1040:
1041: // insert a 4 byte length/codepoint pair into the buffer.
1042: // total of 4 bytes inserted in buffer.
1043: // Note: datalength will be incremented by the size of the llcp, 4,
1044: // before being inserted.
1045: final void writeScalarHeader(int codePoint, int dataLength) {
1046: ensureLength(offset_ + dataLength + 4);
1047: bytes_[offset_++] = (byte) (((dataLength + 4) >>> 8) & 0xff);
1048: bytes_[offset_++] = (byte) ((dataLength + 4) & 0xff);
1049: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
1050: bytes_[offset_++] = (byte) (codePoint & 0xff);
1051: }
1052:
1053: // insert a 4 byte length/codepoint pair plus ddm character data into
1054: // the buffer. This method assumes that the String argument can be
1055: // converted by the ccsid manager. This should be fine because usually
1056: // there are restrictions on the characters which can be used for ddm
1057: // character data. This method also assumes that the string.length() will
1058: // be the number of bytes following the conversion.
1059: // The two byte length field will contain the length of the character data
1060: // and the length of the 4 byte llcp. This method does not handle
1061: // scenarios which require extended length bytes.
1062: final void writeScalarString(int codePoint, String string)
1063: throws SqlException {
1064: int stringLength = string.length();
1065: ensureLength(offset_ + stringLength + 4);
1066: bytes_[offset_++] = (byte) (((stringLength + 4) >>> 8) & 0xff);
1067: bytes_[offset_++] = (byte) ((stringLength + 4) & 0xff);
1068: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
1069: bytes_[offset_++] = (byte) (codePoint & 0xff);
1070: offset_ = ccsidManager_.convertFromUCS2(string, bytes_,
1071: offset_, netAgent_);
1072: }
1073:
1074: // insert a 4 byte length/codepoint pair plus ddm character data into the
1075: // buffer. The ddm character data is padded if needed with the ccsid manager's
1076: // space character if the length of the character data is less than paddedLength.
1077: // Note: this method is not to be used for String truncation and the string length
1078: // must be <= paddedLength.
1079: // This method assumes that the String argument can be
1080: // converted by the ccsid manager. This should be fine because usually
1081: // there are restrictions on the characters which can be used for ddm
1082: // character data. This method also assumes that the string.length() will
1083: // be the number of bytes following the conversion. The two byte length field
1084: // of the llcp will contain the length of the character data including the pad
1085: // and the length of the llcp or 4. This method will not handle extended length
1086: // scenarios.
1087: final void writeScalarPaddedString(int codePoint, String string,
1088: int paddedLength) throws SqlException {
1089: int stringLength = string.length();
1090: ensureLength(offset_ + paddedLength + 4);
1091: bytes_[offset_++] = (byte) (((paddedLength + 4) >>> 8) & 0xff);
1092: bytes_[offset_++] = (byte) ((paddedLength + 4) & 0xff);
1093: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
1094: bytes_[offset_++] = (byte) (codePoint & 0xff);
1095: offset_ = ccsidManager_.convertFromUCS2(string, bytes_,
1096: offset_, netAgent_);
1097: for (int i = 0; i < paddedLength - stringLength; i++) {
1098: bytes_[offset_++] = ccsidManager_.space_;
1099: }
1100: }
1101:
1102: // this method inserts ddm character data into the buffer and pad's the
1103: // data with the ccsid manager's space character if the character data length
1104: // is less than paddedLength.
1105: // Not: this method is not to be used for String truncation and the string length
1106: // must be <= paddedLength.
1107: // This method assumes that the String argument can be
1108: // converted by the ccsid manager. This should be fine because usually
1109: // there are restrictions on the characters which can be used for ddm
1110: // character data. This method also assumes that the string.length() will
1111: // be the number of bytes following the conversion.
1112: final void writeScalarPaddedString(String string, int paddedLength)
1113: throws SqlException {
1114: int stringLength = string.length();
1115: ensureLength(offset_ + paddedLength);
1116: offset_ = ccsidManager_.convertFromUCS2(string, bytes_,
1117: offset_, netAgent_);
1118: for (int i = 0; i < paddedLength - stringLength; i++) {
1119: bytes_[offset_++] = ccsidManager_.space_;
1120: }
1121: }
1122:
1123: // this method writes a 4 byte length/codepoint pair plus the bytes contained
1124: // in array buff to the buffer.
1125: // the 2 length bytes in the llcp will contain the length of the data plus
1126: // the length of the llcp. This method does not handle scenarios which
1127: // require extended length bytes.
1128: final void writeScalarBytes(int codePoint, byte[] buff) {
1129: int buffLength = buff.length;
1130: ensureLength(offset_ + buffLength + 4);
1131: bytes_[offset_++] = (byte) (((buffLength + 4) >>> 8) & 0xff);
1132: bytes_[offset_++] = (byte) ((buffLength + 4) & 0xff);
1133: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
1134: bytes_[offset_++] = (byte) (codePoint & 0xff);
1135: System.arraycopy(buff, 0, bytes_, offset_, buffLength);
1136: offset_ += buffLength;
1137: }
1138:
1139: // this method inserts a 4 byte length/codepoint pair plus length number of bytes
1140: // from array buff starting at offset start.
1141: // Note: no checking will be done on the values of start and length with respect
1142: // the actual length of the byte array. The caller must provide the correct
1143: // values so an array index out of bounds exception does not occur.
1144: // the length will contain the length of the data plus the length of the llcp.
1145: // This method does not handle scenarios which require extended length bytes.
1146: final void writeScalarBytes(int codePoint, byte[] buff, int start,
1147: int length) {
1148: ensureLength(offset_ + length + 4);
1149: bytes_[offset_++] = (byte) (((length + 4) >>> 8) & 0xff);
1150: bytes_[offset_++] = (byte) ((length + 4) & 0xff);
1151: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
1152: bytes_[offset_++] = (byte) (codePoint & 0xff);
1153: System.arraycopy(buff, start, bytes_, offset_, length);
1154: offset_ += length;
1155: }
1156:
1157: // insert a 4 byte length/codepoint pair plus ddm binary data into the
1158: // buffer. The binary data is padded if needed with the padByte
1159: // if the data is less than paddedLength.
1160: // Note: this method is not to be used for truncation and buff.length
1161: // must be <= paddedLength.
1162: // The llcp length bytes will contain the length of the data plus
1163: // the length of the llcp or 4.
1164: // This method does not handle scenarios which require extended length bytes.
1165: final void writeScalarPaddedBytes(int codePoint, byte[] buff,
1166: int paddedLength, byte padByte) {
1167: int buffLength = buff.length;
1168: ensureLength(offset_ + paddedLength + 4);
1169: bytes_[offset_++] = (byte) (((paddedLength + 4) >>> 8) & 0xff);
1170: bytes_[offset_++] = (byte) ((paddedLength + 4) & 0xff);
1171: bytes_[offset_++] = (byte) ((codePoint >>> 8) & 0xff);
1172: bytes_[offset_++] = (byte) (codePoint & 0xff);
1173: System.arraycopy(buff, 0, bytes_, offset_, buffLength);
1174: offset_ += buffLength;
1175:
1176: for (int i = 0; i < paddedLength - buffLength; i++) {
1177: bytes_[offset_++] = padByte;
1178: }
1179: }
1180:
1181: // this method inserts binary data into the buffer and pads the
1182: // data with the padByte if the data length is less than the paddedLength.
1183: // Not: this method is not to be used for truncation and buff.length
1184: // must be <= paddedLength.
1185: final void writeScalarPaddedBytes(byte[] buff, int paddedLength,
1186: byte padByte) {
1187: int buffLength = buff.length;
1188: ensureLength(offset_ + paddedLength);
1189: System.arraycopy(buff, 0, bytes_, offset_, buffLength);
1190: offset_ += buffLength;
1191:
1192: for (int i = 0; i < paddedLength - buffLength; i++) {
1193: bytes_[offset_++] = padByte;
1194: }
1195: }
1196:
1197: // write the request to the OutputStream and flush the OutputStream.
1198: // trace the send if PROTOCOL trace is on.
1199: protected void flush(java.io.OutputStream socketOutputStream)
1200: throws java.io.IOException {
1201: if (doesRequestContainData()) {
1202: finalizeDssLength();
1203: sendBytes(socketOutputStream);
1204: }
1205: }
1206:
1207: protected void sendBytes(java.io.OutputStream socketOutputStream)
1208: throws java.io.IOException {
1209: try {
1210: socketOutputStream.write(bytes_, 0, offset_);
1211: socketOutputStream.flush();
1212: } finally {
1213: if (netAgent_.logWriter_ != null && passwordIncluded_) {
1214: // if password is in the buffer, need to mask it out.
1215: maskOutPassword();
1216: passwordIncluded_ = false;
1217: }
1218: if (netAgent_.loggingEnabled()) {
1219: ((NetLogWriter) netAgent_.logWriter_)
1220: .traceProtocolFlow(bytes_, 0, offset_,
1221: NetLogWriter.TYPE_TRACE_SEND,
1222: "Request", "flush", 1); // tracepoint
1223: }
1224: clearBuffer();
1225: }
1226: }
1227:
1228: final void maskOutPassword() {
1229: try {
1230: String maskChar = "*";
1231: // construct a mask using the maskChar.
1232: StringBuffer mask = new StringBuffer();
1233: for (int i = 0; i < passwordLength_; i++) {
1234: mask.append(maskChar);
1235: }
1236: // try to write mask over password.
1237: ccsidManager_.convertFromUCS2(mask.toString(), bytes_,
1238: passwordStart_, netAgent_);
1239: } catch (SqlException sqle) {
1240: // failed to convert mask,
1241: // them simply replace with 0xFF.
1242: for (int i = 0; i < passwordLength_; i++) {
1243: bytes_[passwordStart_ + i] = (byte) 0xFF;
1244: }
1245: }
1246: }
1247:
1248: // insert a java byte into the buffer.
1249: final void writeByte(byte v) {
1250: ensureLength(offset_ + 1);
1251: bytes_[offset_++] = v;
1252: }
1253:
1254: // insert a java short into the buffer.
1255: final void writeShort(short v) {
1256: ensureLength(offset_ + 2);
1257: org.apache.derby.client.am.SignedBinary.shortToBigEndianBytes(
1258: bytes_, offset_, v);
1259: offset_ += 2;
1260: }
1261:
1262: // insert a java int into the buffer.
1263: void writeInt(int v) {
1264: ensureLength(offset_ + 4);
1265: org.apache.derby.client.am.SignedBinary.intToBigEndianBytes(
1266: bytes_, offset_, v);
1267: offset_ += 4;
1268: }
1269:
1270: // insert a java long into the buffer.
1271: final void writeLong(long v) {
1272: ensureLength(offset_ + 8);
1273: org.apache.derby.client.am.SignedBinary.longToBigEndianBytes(
1274: bytes_, offset_, v);
1275: offset_ += 8;
1276: }
1277:
1278: //-- The following are the write short/int/long in bigEndian byte ordering --
1279:
1280: // when writing Fdoca data.
1281: protected void writeShortFdocaData(short v) {
1282: ensureLength(offset_ + 2);
1283: org.apache.derby.client.am.SignedBinary.shortToBigEndianBytes(
1284: bytes_, offset_, v);
1285: offset_ += 2;
1286: }
1287:
1288: // when writing Fdoca data.
1289: protected void writeIntFdocaData(int v) {
1290: ensureLength(offset_ + 4);
1291: org.apache.derby.client.am.SignedBinary.intToBigEndianBytes(
1292: bytes_, offset_, v);
1293: offset_ += 4;
1294: }
1295:
1296: // when writing Fdoca data.
1297: protected void writeLongFdocaData(long v) {
1298: ensureLength(offset_ + 8);
1299: org.apache.derby.client.am.SignedBinary.longToBigEndianBytes(
1300: bytes_, offset_, v);
1301: offset_ += 8;
1302: }
1303:
1304: // insert a java float into the buffer.
1305: protected void writeFloat(float v) {
1306: ensureLength(offset_ + 4);
1307: org.apache.derby.client.am.FloatingPoint.floatToIeee754Bytes(
1308: bytes_, offset_, v);
1309: offset_ += 4;
1310: }
1311:
1312: // insert a java double into the buffer.
1313: protected void writeDouble(double v) {
1314: ensureLength(offset_ + 8);
1315: org.apache.derby.client.am.FloatingPoint.doubleToIeee754Bytes(
1316: bytes_, offset_, v);
1317: offset_ += 8;
1318: }
1319:
1320: // insert a java.math.BigDecimal into the buffer.
1321: final void writeBigDecimal(java.math.BigDecimal v,
1322: int declaredPrecision, int declaredScale)
1323: throws SqlException {
1324: ensureLength(offset_ + 16);
1325: int length = org.apache.derby.client.am.Decimal
1326: .bigDecimalToPackedDecimalBytes(bytes_, offset_, v,
1327: declaredPrecision, declaredScale);
1328: offset_ += length;
1329: }
1330:
1331: final void writeDate(java.sql.Date date) throws SqlException {
1332: try {
1333: ensureLength(offset_ + 10);
1334: org.apache.derby.client.am.DateTime.dateToDateBytes(bytes_,
1335: offset_, date);
1336: offset_ += 10;
1337: } catch (java.io.UnsupportedEncodingException e) {
1338: throw new SqlException(netAgent_.logWriter_,
1339: new ClientMessageId(SQLState.UNSUPPORTED_ENCODING),
1340: "java.sql.Date", "DATE", e);
1341: }
1342: }
1343:
1344: final void writeTime(java.sql.Time time) throws SqlException {
1345: try {
1346: ensureLength(offset_ + 8);
1347: org.apache.derby.client.am.DateTime.timeToTimeBytes(bytes_,
1348: offset_, time);
1349: offset_ += 8;
1350: } catch (UnsupportedEncodingException e) {
1351: throw new SqlException(netAgent_.logWriter_,
1352: new ClientMessageId(SQLState.UNSUPPORTED_ENCODING),
1353: "java.sql.Time", "TIME", e);
1354: }
1355: }
1356:
1357: final void writeTimestamp(java.sql.Timestamp timestamp)
1358: throws SqlException {
1359: try {
1360: ensureLength(offset_ + 26);
1361: org.apache.derby.client.am.DateTime
1362: .timestampToTimestampBytes(bytes_, offset_,
1363: timestamp);
1364: offset_ += 26;
1365: } catch (UnsupportedEncodingException e) {
1366: throw new SqlException(netAgent_.logWriter_,
1367: new ClientMessageId(SQLState.UNSUPPORTED_ENCODING),
1368: "java.sql.Timestamp", "TIMESTAMP", e);
1369: }
1370: }
1371:
1372: // insert a java boolean into the buffer. the boolean is written
1373: // as a signed byte having the value 0 or 1.
1374: final void writeBoolean(boolean v) {
1375: ensureLength(offset_ + 1);
1376: bytes_[offset_++] = (byte) ((v ? 1 : 0) & 0xff);
1377: }
1378:
1379: // follows the TYPDEF rules (note: don't think ddm char data is ever length
1380: // delimited)
1381: // should this throw SqlException
1382: // Will write a varchar mixed or single
1383: // this was writeLDString
1384: final void writeSingleorMixedCcsidLDString(String s, String encoding)
1385: throws SqlException {
1386: byte[] b;
1387: try {
1388: b = s.getBytes(encoding);
1389: } catch (UnsupportedEncodingException e) {
1390: throw new SqlException(netAgent_.logWriter_,
1391: new ClientMessageId(SQLState.UNSUPPORTED_ENCODING),
1392: "String", "byte", e);
1393: }
1394: if (b.length > 0x7FFF) {
1395: throw new SqlException(netAgent_.logWriter_,
1396: new ClientMessageId(SQLState.LANG_STRING_TOO_LONG),
1397: "32767");
1398: }
1399: ensureLength(offset_ + b.length + 2);
1400: writeLDBytesX(b.length, b);
1401: }
1402:
1403: final void writeLDBytes(byte[] bytes) {
1404: ensureLength(offset_ + bytes.length + 2);
1405: writeLDBytesX(bytes.length, bytes);
1406: }
1407:
1408: // private helper method which should only be called by a Request method.
1409: // must call ensureLength before calling this method.
1410: // added for code reuse and helps perf by reducing ensureLength calls.
1411: // ldSize and bytes.length may not be the same. this is true
1412: // when writing graphic ld strings.
1413: private final void writeLDBytesX(int ldSize, byte[] bytes) {
1414: bytes_[offset_++] = (byte) ((ldSize >>> 8) & 0xff);
1415: bytes_[offset_++] = (byte) (ldSize & 0xff);
1416: System.arraycopy(bytes, 0, bytes_, offset_, bytes.length);
1417: offset_ += bytes.length;
1418: }
1419:
1420: // does it follows
1421: // ccsid manager or typdef rules. should this method write ddm character
1422: // data or fodca data right now it is coded for ddm char data only
1423: final void writeDDMString(String s) throws SqlException {
1424: ensureLength(offset_ + s.length());
1425: offset_ = ccsidManager_.convertFromUCS2(s, bytes_, offset_,
1426: netAgent_);
1427: }
1428:
1429: private byte[] buildLengthAndCodePointForEncryptedLob(
1430: int codePoint, int leftToRead, boolean writeNullByte,
1431: int extendedLengthByteCount) throws DisconnectException {
1432: byte[] lengthAndCodepoint = new byte[4];
1433: byte[] extendedLengthBytes = new byte[extendedLengthByteCount];
1434:
1435: if (extendedLengthByteCount > 0) {
1436: // method should never ensure length
1437: lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(
1438: 0x8004 + extendedLengthByteCount, codePoint);
1439:
1440: if (writeNullByte) {
1441:
1442: extendedLengthBytes = writeExtendedLengthBytesForEncryption(
1443: extendedLengthByteCount, leftToRead + 1);
1444: } else {
1445: extendedLengthBytes = writeExtendedLengthBytesForEncryption(
1446: extendedLengthByteCount, leftToRead);
1447: }
1448: } else {
1449: if (writeNullByte) {
1450: lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(
1451: leftToRead + 4 + 1, codePoint);
1452: } else {
1453: lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(
1454: leftToRead + 4, codePoint);
1455: }
1456: }
1457:
1458: if (extendedLengthByteCount > 0) {
1459: byte[] newLengthAndCodepoint = new byte[4 + extendedLengthBytes.length];
1460: System
1461: .arraycopy(lengthAndCodepoint, 0,
1462: newLengthAndCodepoint, 0,
1463: lengthAndCodepoint.length);
1464: System.arraycopy(extendedLengthBytes, 0,
1465: newLengthAndCodepoint, lengthAndCodepoint.length,
1466: extendedLengthBytes.length);
1467: lengthAndCodepoint = newLengthAndCodepoint;
1468: }
1469:
1470: if (writeNullByte) {
1471: byte[] nullByte = new byte[1 + lengthAndCodepoint.length];
1472: System.arraycopy(lengthAndCodepoint, 0, nullByte, 0,
1473: lengthAndCodepoint.length);
1474: nullByte[lengthAndCodepoint.length] = 0;
1475: lengthAndCodepoint = nullByte;
1476: }
1477: return lengthAndCodepoint;
1478: }
1479:
1480: private void buildLengthAndCodePointForLob(int codePoint,
1481: int leftToRead, boolean writeNullByte,
1482: int extendedLengthByteCount) throws DisconnectException {
1483: if (extendedLengthByteCount > 0) {
1484: // method should never ensure length
1485: writeLengthCodePoint(0x8004 + extendedLengthByteCount,
1486: codePoint);
1487:
1488: if (writeNullByte) {
1489: writeExtendedLengthBytes(extendedLengthByteCount,
1490: leftToRead + 1);
1491: } else {
1492: writeExtendedLengthBytes(extendedLengthByteCount,
1493: leftToRead);
1494: }
1495: } else {
1496: if (writeNullByte) {
1497: writeLengthCodePoint(leftToRead + 4 + 1, codePoint);
1498: } else {
1499: writeLengthCodePoint(leftToRead + 4, codePoint);
1500: }
1501: }
1502:
1503: // write the null byte, if necessary
1504: if (writeNullByte) {
1505: write1Byte(0x0);
1506: }
1507:
1508: }
1509:
1510: public void setDssLengthLocation(int location) {
1511: dssLengthLocation_ = location;
1512: }
1513:
1514: public void setCorrelationID(int id) {
1515: correlationID_ = id;
1516: }
1517: }
|