0001: /*
0002: * $RCSfile: TIFFDecompressor.java,v $
0003: *
0004: *
0005: * Copyright (c) 2005 Sun Microsystems, Inc. All Rights Reserved.
0006: *
0007: * Redistribution and use in source and binary forms, with or without
0008: * modification, are permitted provided that the following conditions
0009: * are met:
0010: *
0011: * - Redistribution of source code must retain the above copyright
0012: * notice, this list of conditions and the following disclaimer.
0013: *
0014: * - Redistribution in binary form must reproduce the above copyright
0015: * notice, this list of conditions and the following disclaimer in
0016: * the documentation and/or other materials provided with the
0017: * distribution.
0018: *
0019: * Neither the name of Sun Microsystems, Inc. or the names of
0020: * contributors may be used to endorse or promote products derived
0021: * from this software without specific prior written permission.
0022: *
0023: * This software is provided "AS IS," without a warranty of any
0024: * kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND
0025: * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,
0026: * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY
0027: * EXCLUDED. SUN MIDROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL
0028: * NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF
0029: * USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS
0030: * DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR
0031: * ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL,
0032: * CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND
0033: * REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR
0034: * INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE
0035: * POSSIBILITY OF SUCH DAMAGES.
0036: *
0037: * You acknowledge that this software is not designed or intended for
0038: * use in the design, construction, operation or maintenance of any
0039: * nuclear facility.
0040: *
0041: * $Revision: 1.3 $
0042: * $Date: 2007/03/09 20:14:40 $
0043: * $State: Exp $
0044: */
0045: package com.sun.media.imageio.plugins.tiff;
0046:
0047: import java.awt.Rectangle;
0048: import java.awt.Transparency;
0049: import java.awt.color.ColorSpace;
0050: import java.awt.image.BufferedImage;
0051: import java.awt.image.ColorModel;
0052: import java.awt.image.ComponentColorModel;
0053: import java.awt.image.ComponentSampleModel;
0054: import java.awt.image.DataBuffer;
0055: import java.awt.image.DataBufferByte;
0056: import java.awt.image.DataBufferFloat;
0057: import java.awt.image.DataBufferInt;
0058: import java.awt.image.DataBufferShort;
0059: import java.awt.image.DataBufferUShort;
0060: import java.awt.image.IndexColorModel;
0061: import java.awt.image.MultiPixelPackedSampleModel;
0062: import java.awt.image.PixelInterleavedSampleModel;
0063: import java.awt.image.Raster;
0064: import java.awt.image.SampleModel;
0065: import java.awt.image.SinglePixelPackedSampleModel;
0066: import java.awt.image.WritableRaster;
0067: import java.io.ByteArrayInputStream;
0068: import java.io.IOException;
0069: import java.nio.ByteOrder;
0070: import javax.imageio.IIOException;
0071: import javax.imageio.ImageReader;
0072: import javax.imageio.ImageTypeSpecifier;
0073: import javax.imageio.metadata.IIOMetadata;
0074: import javax.imageio.stream.ImageInputStream;
0075: import javax.imageio.stream.MemoryCacheImageInputStream;
0076: import com.sun.media.imageioimpl.common.BogusColorSpace;
0077: import com.sun.media.imageioimpl.common.ImageUtil;
0078: import com.sun.media.imageioimpl.common.SimpleCMYKColorSpace;
0079:
0080: /**
0081: * A class defining a pluggable TIFF decompressor.
0082: *
0083: * <p> The mapping between source and destination Y coordinates is
0084: * given by the equations:
0085: *
0086: * <pre>
0087: * dx = (sx - sourceXOffset)/subsampleX + dstXOffset;
0088: * dy = (sy - sourceYOffset)/subsampleY + dstYOffset;
0089: * </pre>
0090: *
0091: * Note that the mapping from source coordinates to destination
0092: * coordinates is not one-to-one if subsampling is being used, since
0093: * only certain source pixels are to be copied to the
0094: * destination. However, * the inverse mapping is always one-to-one:
0095: *
0096: * <pre>
0097: * sx = (dx - dstXOffset)*subsampleX + sourceXOffset;
0098: * sy = (dy - dstYOffset)*subsampleY + sourceYOffset;
0099: * </pre>
0100: *
0101: * <p> Decompressors may be written with various levels of complexity.
0102: * The most complex decompressors will override the
0103: * <code>decode</code> method, and will perform all the work of
0104: * decoding, subsampling, offsetting, clipping, and format conversion.
0105: * This approach may be the most efficient, since it is possible to
0106: * avoid the use of extra image buffers, and it may be possible to
0107: * avoid decoding portions of the image that will not be copied into
0108: * the destination.
0109: *
0110: * <p> Less ambitious decompressors may override the
0111: * <code>decodeRaw</code> method, which is responsible for
0112: * decompressing the entire tile or strip into a byte array (or other
0113: * appropriate datatype). The default implementation of
0114: * <code>decode</code> will perform all necessary setup of buffers,
0115: * call <code>decodeRaw</code> to perform the actual decoding, perform
0116: * subsampling, and copy the results into the final destination image.
0117: * Where possible, it will pass the real image buffer to
0118: * <code>decodeRaw</code> in order to avoid making an extra copy.
0119: *
0120: * <p> Slightly more ambitious decompressors may override
0121: * <code>decodeRaw</code>, but avoid writing pixels that will be
0122: * discarded in the subsampling phase.
0123: */
0124: public abstract class TIFFDecompressor {
0125:
0126: private static final boolean DEBUG = false; // XXX false for release!
0127:
0128: /**
0129: * The <code>ImageReader</code> calling this
0130: * <code>TIFFDecompressor</code>.
0131: */
0132: protected ImageReader reader;
0133:
0134: /**
0135: * The <code>IIOMetadata</code> object containing metadata for the
0136: * current image.
0137: */
0138: protected IIOMetadata metadata;
0139:
0140: /**
0141: * The value of the <code>PhotometricInterpretation</code> tag.
0142: * Legal values are {@link
0143: * BaselineTIFFTagSet#PHOTOMETRIC_INTERPRETATION_WHITE_IS_ZERO },
0144: * {@link
0145: * BaselineTIFFTagSet#PHOTOMETRIC_INTERPRETATION_BLACK_IS_ZERO},
0146: * {@link BaselineTIFFTagSet#PHOTOMETRIC_INTERPRETATION_RGB},
0147: * {@link
0148: * BaselineTIFFTagSet#PHOTOMETRIC_INTERPRETATION_PALETTE_COLOR},
0149: * {@link
0150: * BaselineTIFFTagSet#PHOTOMETRIC_INTERPRETATION_TRANSPARENCY_MASK},
0151: * {@link BaselineTIFFTagSet#PHOTOMETRIC_INTERPRETATION_Y_CB_CR},
0152: * {@link BaselineTIFFTagSet#PHOTOMETRIC_INTERPRETATION_CIELAB},
0153: * {@link BaselineTIFFTagSet#PHOTOMETRIC_INTERPRETATION_ICCLAB},
0154: * or other value defined by a TIFF extension.
0155: */
0156: protected int photometricInterpretation;
0157:
0158: /**
0159: * The value of the <code>Compression</code> tag. Legal values are
0160: * {@link BaselineTIFFTagSet#COMPRESSION_NONE}, {@link
0161: * BaselineTIFFTagSet#COMPRESSION_CCITT_RLE}, {@link
0162: * BaselineTIFFTagSet#COMPRESSION_CCITT_T_4}, {@link
0163: * BaselineTIFFTagSet#COMPRESSION_CCITT_T_6}, {@link
0164: * BaselineTIFFTagSet#COMPRESSION_LZW}, {@link
0165: * BaselineTIFFTagSet#COMPRESSION_OLD_JPEG}, {@link
0166: * BaselineTIFFTagSet#COMPRESSION_JPEG}, {@link
0167: * BaselineTIFFTagSet#COMPRESSION_ZLIB}, {@link
0168: * BaselineTIFFTagSet#COMPRESSION_PACKBITS}, {@link
0169: * BaselineTIFFTagSet#COMPRESSION_DEFLATE}, or other value
0170: * defined by a TIFF extension.
0171: */
0172: protected int compression;
0173:
0174: /**
0175: * <code>true</code> if the image is encoded using separate planes.
0176: */
0177: protected boolean planar;
0178:
0179: /**
0180: * The value of the <code>SamplesPerPixel</code> tag.
0181: */
0182: protected int samplesPerPixel;
0183:
0184: /**
0185: * The value of the <code>BitsPerSample</code> tag.
0186: *
0187: */
0188: protected int[] bitsPerSample;
0189:
0190: /**
0191: * The value of the <code>SampleFormat</code> tag. Legal values
0192: * are {@link BaselineTIFFTagSet#SAMPLE_FORMAT_UNSIGNED_INTEGER},
0193: * {@link BaselineTIFFTagSet#SAMPLE_FORMAT_SIGNED_INTEGER}, {@link
0194: * BaselineTIFFTagSet#SAMPLE_FORMAT_FLOATING_POINT}, {@link
0195: * BaselineTIFFTagSet#SAMPLE_FORMAT_UNDEFINED}, or other value
0196: * defined by a TIFF extension.
0197: */
0198: protected int[] sampleFormat = new int[] { BaselineTIFFTagSet.SAMPLE_FORMAT_UNSIGNED_INTEGER };
0199:
0200: /**
0201: * The value of the <code>ExtraSamples</code> tag. Legal values
0202: * are {@link BaselineTIFFTagSet#EXTRA_SAMPLES_UNSPECIFIED},
0203: * {@link BaselineTIFFTagSet#EXTRA_SAMPLES_ASSOCIATED_ALPHA},
0204: * {@link BaselineTIFFTagSet#EXTRA_SAMPLES_UNASSOCIATED_ALPHA},
0205: * or other value defined by a TIFF extension.
0206: */
0207: protected int[] extraSamples;
0208:
0209: /**
0210: * The value of the <code>ColorMap</code> tag.
0211: *
0212: */
0213: protected char[] colorMap;
0214:
0215: // Region of input stream containing the data
0216:
0217: /**
0218: * The <code>ImageInputStream</code> containing the TIFF source
0219: * data.
0220: */
0221: protected ImageInputStream stream;
0222:
0223: /**
0224: * The offset in the source <code>ImageInputStream</code> of the
0225: * start of the data to be decompressed.
0226: */
0227: protected long offset;
0228:
0229: /**
0230: * The number of bytes of data from the source
0231: * <code>ImageInputStream</code> to be decompressed.
0232: */
0233: protected int byteCount;
0234:
0235: // Region of the file image represented in the stream
0236: // This is unaffected by subsampling
0237:
0238: /**
0239: * The X coordinate of the upper-left pixel of the source region
0240: * being decoded from the source stream. This value is not affected
0241: * by source subsampling.
0242: */
0243: protected int srcMinX;
0244:
0245: /**
0246: * The Y coordinate of the upper-left pixel of the source region
0247: * being decoded from the source stream. This value is not affected
0248: * by source subsampling.
0249: */
0250: protected int srcMinY;
0251:
0252: /**
0253: * The width of the source region being decoded from the source
0254: * stream. This value is not affected by source subsampling.
0255: */
0256: protected int srcWidth;
0257:
0258: /**
0259: * The height of the source region being decoded from the source
0260: * stream. This value is not affected by source subsampling.
0261: */
0262: protected int srcHeight;
0263:
0264: // Subsampling to be performed
0265:
0266: /**
0267: * The source X offset used, along with <code>dstXOffset</code>
0268: * and <code>subsampleX</code>, to map between horizontal source
0269: * and destination pixel coordinates.
0270: */
0271: protected int sourceXOffset;
0272:
0273: /**
0274: * The horizontal destination offset used, along with
0275: * <code>sourceXOffset</code> and <code>subsampleX</code>, to map
0276: * between horizontal source and destination pixel coordinates.
0277: * See the comment for {@link #sourceXOffset
0278: * <code>sourceXOffset</code>} for the mapping equations.
0279: */
0280: protected int dstXOffset;
0281:
0282: /**
0283: * The source Y offset used, along with <code>dstYOffset</code>
0284: * and <code>subsampleY</code>, to map between vertical source and
0285: * destination pixel coordinates.
0286: */
0287: protected int sourceYOffset;
0288:
0289: /**
0290: * The vertical destination offset used, along with
0291: * <code>sourceYOffset</code> and <code>subsampleY</code>, to map
0292: * between horizontal source and destination pixel coordinates.
0293: * See the comment for {@link #sourceYOffset
0294: * <code>sourceYOffset</code>} for the mapping equations.
0295: */
0296: protected int dstYOffset;
0297:
0298: /**
0299: * The horizontal subsampling factor. A factor of 1 means that
0300: * every column is copied to the destination; a factor of 2 means
0301: * that every second column is copied, etc.
0302: */
0303: protected int subsampleX;
0304:
0305: /**
0306: * The vertical subsampling factor. A factor of 1 means that
0307: * every row is copied to the destination; a factor of 2 means
0308: * that every second row is copied, etc.
0309: */
0310: protected int subsampleY;
0311:
0312: // Band subsetting/rearrangement
0313:
0314: /**
0315: * The sequence of source bands that are to be copied into the
0316: * destination.
0317: */
0318: protected int[] sourceBands;
0319:
0320: /**
0321: * The sequence of destination bands to receive the source data.
0322: */
0323: protected int[] destinationBands;
0324:
0325: // Destination for decodeRaw
0326:
0327: /**
0328: * A <code>BufferedImage</code> for the <code>decodeRaw</code>
0329: * method to write into.
0330: */
0331: protected BufferedImage rawImage;
0332:
0333: // Destination
0334:
0335: /**
0336: * The final destination image.
0337: */
0338: protected BufferedImage image;
0339:
0340: /**
0341: * The X coordinate of the upper left pixel to be written in the
0342: * destination image.
0343: */
0344: protected int dstMinX;
0345:
0346: /**
0347: * The Y coordinate of the upper left pixel to be written in the
0348: * destination image.
0349: */
0350: protected int dstMinY;
0351:
0352: /**
0353: * The width of the region of the destination image to be written.
0354: */
0355: protected int dstWidth;
0356:
0357: /**
0358: * The height of the region of the destination image to be written.
0359: */
0360: protected int dstHeight;
0361:
0362: // Region of source contributing to the destination
0363:
0364: /**
0365: * The X coordinate of the upper-left source pixel that will
0366: * actually be copied into the destination image, taking into
0367: * account all subsampling, offsetting, and clipping. That is,
0368: * the pixel at (<code>activeSrcMinX</code>,
0369: * <code>activeSrcMinY</code>) is to be copied into the
0370: * destination pixel at (<code>dstMinX</code>,
0371: * <code>dstMinY</code>).
0372: *
0373: * <p> The pixels in the source region to be copied are
0374: * those with X coordinates of the form <code>activeSrcMinX +
0375: * k*subsampleX</code>, where <code>k</code> is an integer such
0376: * that <code>0 <= k < dstWidth</code>.
0377: */
0378: protected int activeSrcMinX;
0379:
0380: /**
0381: * The Y coordinate of the upper-left source pixel that will
0382: * actually be copied into the destination image, taking into account
0383: * all subsampling, offsetting, and clipping.
0384: *
0385: * <p> The pixels in the source region to be copied are
0386: * those with Y coordinates of the form <code>activeSrcMinY +
0387: * k*subsampleY</code>, where <code>k</code> is an integer such
0388: * that <code>0 <= k < dstHeight</code>.
0389: */
0390: protected int activeSrcMinY;
0391:
0392: /**
0393: * The width of the source region that will actually be copied
0394: * into the destination image, taking into account all
0395: * susbampling, offsetting, and clipping.
0396: *
0397: * <p> The active source width will always be equal to
0398: * <code>(dstWidth - 1)*subsampleX + 1</code>.
0399: */
0400: protected int activeSrcWidth;
0401:
0402: /**
0403: * The height of the source region that will actually be copied
0404: * into the destination image, taking into account all
0405: * susbampling, offsetting, and clipping.
0406: *
0407: * <p> The active source height will always be equal to
0408: * <code>(dstHeight - 1)*subsampleY + 1</code>.
0409: */
0410: protected int activeSrcHeight;
0411:
0412: /**
0413: * A <code>TIFFColorConverter</code> object describing the color space of
0414: * the encoded pixel data, or <code>null</code>.
0415: */
0416: protected TIFFColorConverter colorConverter;
0417:
0418: boolean isBilevel;
0419: boolean isContiguous;
0420: boolean isImageSimple;
0421: boolean adjustBitDepths;
0422: int[][] bitDepthScale;
0423:
0424: // source pixel at (sx, sy) should map to dst pixel (dx, dy), where:
0425: //
0426: // dx = (sx - sourceXOffset)/subsampleX + dstXOffset;
0427: // dy = (sy - sourceYOffset)/subsampleY + dstYOffset;
0428: //
0429: // Note that this mapping is many-to-one. Source pixels such that
0430: // (sx - sourceXOffset) % subsampleX != 0 should not be copied
0431: // (and similarly for y).
0432: //
0433: // The backwards mapping from dest to source is one-to-one:
0434: //
0435: // sx = (dx - dstXOffset)*subsampleX + sourceXOffset;
0436: // sy = (dy - dstYOffset)*subsampleY + sourceYOffset;
0437: //
0438: // The reader will always hand us the full source region as it
0439: // exists in the file. It will take care of clipping the dest region
0440: // to exactly those dest pixels that are present in the source region.
0441:
0442: /**
0443: * Create a <code>PixelInterleavedSampleModel</code> for use in creating
0444: * an <code>ImageTypeSpecifier</code>. Its dimensions will be 1x1 and
0445: * it will have ascending band offsets as {0, 1, 2, ..., numBands}.
0446: *
0447: * @param dataType The data type (DataBuffer.TYPE_*).
0448: * @param numBands The number of bands.
0449: * @return A <code>PixelInterleavedSampleModel</code>.
0450: */
0451: // XXX Maybe don't need to have this as a separate method?
0452: static SampleModel createInterleavedSM(int dataType, int numBands) {
0453: int[] bandOffsets = new int[numBands];
0454: for (int i = 0; i < numBands; i++) {
0455: bandOffsets[i] = i;
0456: }
0457: return new PixelInterleavedSampleModel(dataType, 1, // width
0458: 1, // height
0459: numBands, // pixelStride,
0460: numBands, // scanlineStride
0461: bandOffsets);
0462: }
0463:
0464: /**
0465: * Create a <code>ComponentColorModel</code> for use in creating
0466: * an <code>ImageTypeSpecifier</code>.
0467: */
0468: // This code was copied from javax.imageio.ImageTypeSpecifier and
0469: // modified to support floating point data.
0470: static ColorModel createComponentCM(ColorSpace colorSpace,
0471: int numBands, int dataType, boolean hasAlpha,
0472: boolean isAlphaPremultiplied) {
0473: int transparency = hasAlpha ? Transparency.TRANSLUCENT
0474: : Transparency.OPAQUE;
0475:
0476: ColorModel colorModel;
0477: if (dataType == DataBuffer.TYPE_FLOAT
0478: || dataType == DataBuffer.TYPE_DOUBLE) {
0479:
0480: colorModel = new ComponentColorModel(colorSpace, hasAlpha,
0481: isAlphaPremultiplied, transparency, dataType);
0482: } else {
0483: int[] numBits = new int[numBands];
0484: int bits;
0485: if (dataType == DataBuffer.TYPE_BYTE) {
0486: bits = 8;
0487: } else if (dataType == DataBuffer.TYPE_SHORT
0488: || dataType == DataBuffer.TYPE_USHORT) {
0489: bits = 16;
0490: } else if (dataType == DataBuffer.TYPE_INT) {
0491: bits = 32;
0492: } else {
0493: throw new IllegalArgumentException("dataType = "
0494: + dataType);
0495: }
0496: for (int i = 0; i < numBands; i++) {
0497: numBits[i] = bits;
0498: }
0499:
0500: colorModel = new ComponentColorModel(colorSpace, numBits,
0501: hasAlpha, isAlphaPremultiplied, transparency,
0502: dataType);
0503: }
0504:
0505: return colorModel;
0506: }
0507:
0508: private static int createMask(int[] bitsPerSample, int band) {
0509: int mask = (1 << bitsPerSample[band]) - 1;
0510: for (int i = band + 1; i < bitsPerSample.length; i++) {
0511: mask <<= bitsPerSample[i];
0512: }
0513:
0514: return mask;
0515: }
0516:
0517: private static int getDataTypeFromNumBits(int numBits,
0518: boolean isSigned) {
0519: int dataType;
0520:
0521: if (numBits <= 8) {
0522: dataType = DataBuffer.TYPE_BYTE;
0523: } else if (numBits <= 16) {
0524: dataType = isSigned ? DataBuffer.TYPE_SHORT
0525: : DataBuffer.TYPE_USHORT;
0526: } else {
0527: dataType = DataBuffer.TYPE_INT;
0528: }
0529:
0530: return dataType;
0531: }
0532:
0533: private static boolean areIntArraysEqual(int[] a, int[] b) {
0534: if (a == null || b == null) {
0535: if (a == null && b == null) {
0536: return true;
0537: } else { // one is null and one is not
0538: return false;
0539: }
0540: }
0541:
0542: if (a.length != b.length) {
0543: return false;
0544: }
0545:
0546: int length = a.length;
0547: for (int i = 0; i < length; i++) {
0548: if (a[i] != b[i]) {
0549: return false;
0550: }
0551: }
0552:
0553: return true;
0554: }
0555:
0556: /**
0557: * Return the number of bits occupied by <code>dataType</code>
0558: * which must be one of the <code>DataBuffer</code> <code>TYPE</code>s.
0559: */
0560: private static int getDataTypeSize(int dataType)
0561: throws IIOException {
0562: int dataTypeSize = 0;
0563: switch (dataType) {
0564: case DataBuffer.TYPE_BYTE:
0565: dataTypeSize = 8;
0566: break;
0567: case DataBuffer.TYPE_SHORT:
0568: case DataBuffer.TYPE_USHORT:
0569: dataTypeSize = 16;
0570: break;
0571: case DataBuffer.TYPE_INT:
0572: case DataBuffer.TYPE_FLOAT:
0573: dataTypeSize = 32;
0574: break;
0575: case DataBuffer.TYPE_DOUBLE:
0576: dataTypeSize = 64;
0577: break;
0578: default:
0579: throw new IIOException("Unknown data type " + dataType);
0580: }
0581:
0582: return dataTypeSize;
0583: }
0584:
0585: /**
0586: * Returns the number of bits per pixel.
0587: */
0588: private static int getBitsPerPixel(SampleModel sm) {
0589: int bitsPerPixel = 0;
0590: int[] sampleSize = sm.getSampleSize();
0591: int numBands = sampleSize.length;
0592: for (int i = 0; i < numBands; i++) {
0593: bitsPerPixel += sampleSize[i];
0594: }
0595: return bitsPerPixel;
0596: }
0597:
0598: /**
0599: * Returns whether all samples have the same number of bits.
0600: */
0601: private static boolean areSampleSizesEqual(SampleModel sm) {
0602: boolean allSameSize = true;
0603: int[] sampleSize = sm.getSampleSize();
0604: int sampleSize0 = sampleSize[0];
0605: int numBands = sampleSize.length;
0606:
0607: for (int i = 1; i < numBands; i++) {
0608: if (sampleSize[i] != sampleSize0) {
0609: allSameSize = false;
0610: break;
0611: }
0612: }
0613:
0614: return allSameSize;
0615: }
0616:
0617: /**
0618: * Determines whether the <code>DataBuffer</code> is filled without
0619: * any interspersed padding bits.
0620: */
0621: private static boolean isDataBufferBitContiguous(SampleModel sm)
0622: throws IIOException {
0623: int dataTypeSize = getDataTypeSize(sm.getDataType());
0624:
0625: if (sm instanceof ComponentSampleModel) {
0626: int numBands = sm.getNumBands();
0627: for (int i = 0; i < numBands; i++) {
0628: if (sm.getSampleSize(i) != dataTypeSize) {
0629: // Sample does not fill data element.
0630: return false;
0631: }
0632: }
0633: } else if (sm instanceof MultiPixelPackedSampleModel) {
0634: MultiPixelPackedSampleModel mppsm = (MultiPixelPackedSampleModel) sm;
0635: if (dataTypeSize % mppsm.getPixelBitStride() != 0) {
0636: // Pixels do not fill the data element.
0637: return false;
0638: }
0639: } else if (sm instanceof SinglePixelPackedSampleModel) {
0640: SinglePixelPackedSampleModel sppsm = (SinglePixelPackedSampleModel) sm;
0641: int numBands = sm.getNumBands();
0642: int numBits = 0;
0643: for (int i = 0; i < numBands; i++) {
0644: numBits += sm.getSampleSize(i);
0645: }
0646: if (numBits != dataTypeSize) {
0647: // Pixel does not fill the data element.
0648: return false;
0649: }
0650: } else {
0651: // Unknown SampleModel class.
0652: return false;
0653: }
0654:
0655: return true;
0656: }
0657:
0658: /**
0659: * Reformats data read as bytes into a short or int buffer.
0660: */
0661: private static void reformatData(byte[] buf, int bytesPerRow,
0662: int numRows, short[] shortData, int[] intData,
0663: int outOffset, int outStride) throws IIOException {
0664:
0665: if (shortData != null) {
0666: if (DEBUG) {
0667: System.out.println("Reformatting data to short");
0668: }
0669: int inOffset = 0;
0670: int shortsPerRow = bytesPerRow / 2;
0671: int numExtraBytes = bytesPerRow % 2;
0672: for (int j = 0; j < numRows; j++) {
0673: int k = outOffset;
0674: for (int i = 0; i < shortsPerRow; i++) {
0675: shortData[k++] = (short) (((buf[inOffset++] & 0xff) << 8) | (buf[inOffset++] & 0xff));
0676: }
0677: if (numExtraBytes != 0) {
0678: shortData[k++] = (short) ((buf[inOffset++] & 0xff) << 8);
0679: }
0680: outOffset += outStride;
0681: }
0682: } else if (intData != null) {
0683: if (DEBUG) {
0684: System.out.println("Reformatting data to int");
0685: }
0686: int inOffset = 0;
0687: int intsPerRow = bytesPerRow / 4;
0688: int numExtraBytes = bytesPerRow % 4;
0689: for (int j = 0; j < numRows; j++) {
0690: int k = outOffset;
0691: for (int i = 0; i < intsPerRow; i++) {
0692: intData[k++] = ((buf[inOffset++] & 0xff) << 24)
0693: | ((buf[inOffset++] & 0xff) << 16)
0694: | ((buf[inOffset++] & 0xff) << 8)
0695: | (buf[inOffset++] & 0xff);
0696: }
0697: if (numExtraBytes != 0) {
0698: int shift = 24;
0699: int ival = 0;
0700: for (int b = 0; b < numExtraBytes; b++) {
0701: ival |= (buf[inOffset++] & 0xff) << shift;
0702: shift -= 8;
0703: }
0704: intData[k++] = ival;
0705: }
0706: outOffset += outStride;
0707: }
0708: } else {
0709: throw new IIOException(
0710: "shortData == null && intData == null!");
0711: }
0712: }
0713:
0714: /**
0715: * Reformats bit-discontiguous data into the <code>DataBuffer</code>
0716: * of the supplied <code>WritableRaster</code>.
0717: */
0718: private static void reformatDiscontiguousData(byte[] buf,
0719: int stride, int w, int h, WritableRaster raster)
0720: throws IOException {
0721:
0722: if (DEBUG) {
0723: System.out.println("Reformatting discontiguous data");
0724: }
0725:
0726: // Get SampleModel info.
0727: SampleModel sm = raster.getSampleModel();
0728: int numBands = sm.getNumBands();
0729: int[] sampleSize = sm.getSampleSize();
0730:
0731: // Initialize input stream.
0732: ByteArrayInputStream is = new ByteArrayInputStream(buf);
0733: ImageInputStream iis = new MemoryCacheImageInputStream(is);
0734:
0735: // Reformat.
0736: long iisPosition = 0L;
0737: int y = raster.getMinY();
0738: for (int j = 0; j < h; j++, y++) {
0739: iis.seek(iisPosition);
0740: int x = raster.getMinX();
0741: for (int i = 0; i < w; i++, x++) {
0742: for (int b = 0; b < numBands; b++) {
0743: long bits = iis.readBits(sampleSize[b]);
0744: raster.setSample(x, y, b, (int) bits);
0745: }
0746: }
0747: iisPosition += stride;
0748: }
0749: }
0750:
0751: /**
0752: * A utility method that returns an
0753: * <code>ImageTypeSpecifier</code> suitable for decoding an image
0754: * with the given parameters.
0755: *
0756: * @param photometricInterpretation the value of the
0757: * <code>PhotometricInterpretation</code> field.
0758: * @param compression the value of the <code>Compression</code> field.
0759: * @param samplesPerPixel the value of the
0760: * <code>SamplesPerPixel</code> field.
0761: * @param bitsPerSample the value of the <code>BitsPerSample</code> field.
0762: * @param sampleFormat the value of the <code>SampleFormat</code> field.
0763: * @param extraSamples the value of the <code>ExtraSamples</code> field.
0764: * @param colorMap the value of the <code>ColorMap</code> field.
0765: *
0766: * @return a suitable <code>ImageTypeSpecifier</code>, or
0767: * <code>null</code> if it is not possible to create one.
0768: */
0769: public static ImageTypeSpecifier getRawImageTypeSpecifier(
0770: int photometricInterpretation, int compression,
0771: int samplesPerPixel, int[] bitsPerSample,
0772: int[] sampleFormat, int[] extraSamples, char[] colorMap) {
0773: // XXX BEGIN
0774: /* XXX
0775: System.out.println("samplesPerPixel: "+samplesPerPixel);
0776: System.out.print("bitsPerSample:");
0777: for(int i = 0; i < bitsPerSample.length; i++) {
0778: System.out.print(" "+bitsPerSample[i]);
0779: }
0780: System.out.println("");
0781: System.out.print("sampleFormat:");
0782: for(int i = 0; i < sampleFormat.length; i++) {
0783: System.out.print(" "+sampleFormat[i]);
0784: }
0785: System.out.println("");
0786: if(extraSamples != null) {
0787: System.out.print("extraSamples:");
0788: for(int i = 0; i < extraSamples.length; i++) {
0789: System.out.print(" "+extraSamples[i]);
0790: }
0791: System.out.println("");
0792: }
0793: */
0794: // XXX END
0795: //
0796: // Types to support:
0797: //
0798: // 1, 2, 4, 8, or 16 bit grayscale or indexed
0799: // 8,8-bit gray+alpha
0800: // 16,16-bit gray+alpha
0801: // 8,8,8-bit RGB
0802: // 8,8,8,8-bit CMYK
0803: // 8,8,8,8-bit RGB+alpha
0804: // 16,16,16-bit RGB
0805: // 16,16,16,16-bit RGB+alpha
0806: // 1,1,1,1 or 2,2,2,2 or 4,4,4,4 CMYK
0807: // R+G+B = 8-bit RGB
0808: // R+G+B+A = 8-bit RGB
0809: // R+G+B = 16-bit RGB
0810: // R+G+B+A = 16-bit RGB
0811: // 8X-bits/sample, arbitrary numBands.
0812: // Arbitrary non-indexed, non-float layouts (discontiguous).
0813: //
0814: // Band-sequential
0815: if (DEBUG) {
0816: System.out.println("\n ---- samplesPerPixel = "
0817: + samplesPerPixel + "\n ---- bitsPerSample[0] = "
0818: + bitsPerSample[0] + "\n ---- sampleFormat[0] = "
0819: + sampleFormat[0]);
0820: }
0821:
0822: // 1, 2, 4, 8, or 16 bit grayscale or indexed images
0823: if (samplesPerPixel == 1
0824: && (bitsPerSample[0] == 1 || bitsPerSample[0] == 2
0825: || bitsPerSample[0] == 4
0826: || bitsPerSample[0] == 8 || bitsPerSample[0] == 16)) {
0827:
0828: // 2 and 16 bits images are not in the baseline
0829: // specification, but we will allow them anyway
0830: // since they fit well into Java2D
0831: //
0832: // this raises the issue of how to write such images...
0833:
0834: if (colorMap == null) {
0835: // Grayscale
0836: boolean isSigned = (sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER);
0837: int dataType;
0838: if (bitsPerSample[0] <= 8) {
0839: dataType = DataBuffer.TYPE_BYTE;
0840: } else {
0841: dataType = sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER ? DataBuffer.TYPE_SHORT
0842: : DataBuffer.TYPE_USHORT;
0843: }
0844:
0845: return ImageTypeSpecifier.createGrayscale(
0846: bitsPerSample[0], dataType, isSigned);
0847: } else {
0848: // Indexed
0849: int mapSize = 1 << bitsPerSample[0];
0850: byte[] redLut = new byte[mapSize];
0851: byte[] greenLut = new byte[mapSize];
0852: byte[] blueLut = new byte[mapSize];
0853: byte[] alphaLut = null;
0854:
0855: int idx = 0;
0856: for (int i = 0; i < mapSize; i++) {
0857: redLut[i] = (byte) ((colorMap[i] * 255) / 65535);
0858: greenLut[i] = (byte) ((colorMap[mapSize + i] * 255) / 65535);
0859: blueLut[i] = (byte) ((colorMap[2 * mapSize + i] * 255) / 65535);
0860: }
0861:
0862: int dataType = bitsPerSample[0] == 8 ? DataBuffer.TYPE_BYTE
0863: : DataBuffer.TYPE_USHORT;
0864: return ImageTypeSpecifier.createIndexed(redLut,
0865: greenLut, blueLut, alphaLut, bitsPerSample[0],
0866: dataType);
0867: }
0868: }
0869:
0870: // 8-bit gray-alpha
0871: if (samplesPerPixel == 2 && bitsPerSample[0] == 8
0872: && bitsPerSample[1] == 8) {
0873: int dataType = DataBuffer.TYPE_BYTE;
0874: boolean alphaPremultiplied = false;
0875: if (extraSamples != null
0876: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
0877: alphaPremultiplied = true;
0878: }
0879: //System.out.println("alphaPremultiplied = "+alphaPremultiplied);//XXX
0880: return ImageTypeSpecifier.createGrayscale(8, dataType,
0881: false, alphaPremultiplied);
0882: }
0883:
0884: // 16-bit gray-alpha
0885: if (samplesPerPixel == 2 && bitsPerSample[0] == 16
0886: && bitsPerSample[1] == 16) {
0887: int dataType = sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER ? DataBuffer.TYPE_SHORT
0888: : DataBuffer.TYPE_USHORT;
0889: boolean alphaPremultiplied = false;
0890: if (extraSamples != null
0891: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
0892: alphaPremultiplied = true;
0893: }
0894: //System.out.println("alphaPremultiplied = "+alphaPremultiplied);//XXX
0895: boolean isSigned = dataType == DataBuffer.TYPE_SHORT;
0896: return ImageTypeSpecifier.createGrayscale(16, dataType,
0897: isSigned, alphaPremultiplied);
0898: }
0899:
0900: ColorSpace rgb = ColorSpace.getInstance(ColorSpace.CS_sRGB);
0901:
0902: // 8-bit RGB
0903: if (samplesPerPixel == 3 && bitsPerSample[0] == 8
0904: && bitsPerSample[1] == 8 && bitsPerSample[2] == 8) {
0905: int[] bandOffsets = new int[3];
0906: bandOffsets[0] = 0;
0907: bandOffsets[1] = 1;
0908: bandOffsets[2] = 2;
0909: int dataType = DataBuffer.TYPE_BYTE;
0910: ColorSpace theColorSpace;
0911: if ((photometricInterpretation == BaselineTIFFTagSet.PHOTOMETRIC_INTERPRETATION_Y_CB_CR
0912: && compression != BaselineTIFFTagSet.COMPRESSION_JPEG && compression != BaselineTIFFTagSet.COMPRESSION_OLD_JPEG)
0913: || photometricInterpretation == BaselineTIFFTagSet.PHOTOMETRIC_INTERPRETATION_CIELAB) {
0914: theColorSpace = ColorSpace
0915: .getInstance(ColorSpace.CS_LINEAR_RGB);
0916: } else {
0917: theColorSpace = rgb;
0918: }
0919: return ImageTypeSpecifier.createInterleaved(theColorSpace,
0920: bandOffsets, dataType, false, false);
0921: }
0922:
0923: // 8-bit RGBA
0924: if (samplesPerPixel == 4 && bitsPerSample[0] == 8
0925: && bitsPerSample[1] == 8 && bitsPerSample[2] == 8
0926: && bitsPerSample[3] == 8) {
0927: int[] bandOffsets = new int[4];
0928: bandOffsets[0] = 0;
0929: bandOffsets[1] = 1;
0930: bandOffsets[2] = 2;
0931: bandOffsets[3] = 3;
0932: int dataType = DataBuffer.TYPE_BYTE;
0933:
0934: ColorSpace theColorSpace;
0935: boolean hasAlpha;
0936: boolean alphaPremultiplied = false;
0937: if (photometricInterpretation == BaselineTIFFTagSet.PHOTOMETRIC_INTERPRETATION_CMYK) {
0938: theColorSpace = SimpleCMYKColorSpace.getInstance();
0939: hasAlpha = false;
0940: } else {
0941: theColorSpace = rgb;
0942: hasAlpha = true;
0943: if (extraSamples != null
0944: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
0945: alphaPremultiplied = true;
0946: }
0947: }
0948:
0949: return ImageTypeSpecifier
0950: .createInterleaved(theColorSpace, bandOffsets,
0951: dataType, hasAlpha, alphaPremultiplied);
0952: }
0953:
0954: // 16-bit RGB
0955: if (samplesPerPixel == 3 && bitsPerSample[0] == 16
0956: && bitsPerSample[1] == 16 && bitsPerSample[2] == 16) {
0957: int[] bandOffsets = new int[3];
0958: bandOffsets[0] = 0;
0959: bandOffsets[1] = 1;
0960: bandOffsets[2] = 2;
0961: int dataType = sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER ? DataBuffer.TYPE_SHORT
0962: : DataBuffer.TYPE_USHORT;
0963: return ImageTypeSpecifier.createInterleaved(rgb,
0964: bandOffsets, dataType, false, false);
0965: }
0966:
0967: // 16-bit RGBA
0968: if (samplesPerPixel == 4 && bitsPerSample[0] == 16
0969: && bitsPerSample[1] == 16 && bitsPerSample[2] == 16
0970: && bitsPerSample[3] == 16) {
0971: int[] bandOffsets = new int[4];
0972: bandOffsets[0] = 0;
0973: bandOffsets[1] = 1;
0974: bandOffsets[2] = 2;
0975: bandOffsets[3] = 3;
0976: int dataType = sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER ? DataBuffer.TYPE_SHORT
0977: : DataBuffer.TYPE_USHORT;
0978:
0979: boolean alphaPremultiplied = false;
0980: if (extraSamples != null
0981: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
0982: alphaPremultiplied = true;
0983: }
0984: return ImageTypeSpecifier.createInterleaved(rgb,
0985: bandOffsets, dataType, true, alphaPremultiplied);
0986: }
0987:
0988: // Support for Tiff files containing half-tone data
0989: // in more than 1 channel
0990: if ((photometricInterpretation == BaselineTIFFTagSet.PHOTOMETRIC_INTERPRETATION_CMYK)
0991: && (bitsPerSample[0] == 1 || bitsPerSample[0] == 2 || bitsPerSample[0] == 4)) {
0992: ColorSpace cs = null;
0993: if (samplesPerPixel == 4)
0994: cs = SimpleCMYKColorSpace.getInstance();
0995: else
0996: cs = new BogusColorSpace(samplesPerPixel);
0997: // By specifying the bits per sample the color values
0998: // will scale on display
0999: ColorModel cm = new ComponentColorModel(cs, bitsPerSample,
1000: false, false, Transparency.OPAQUE,
1001: DataBuffer.TYPE_BYTE);
1002: return new ImageTypeSpecifier(cm, cm
1003: .createCompatibleSampleModel(1, 1));
1004: }
1005:
1006: // Compute bits per pixel.
1007: int totalBits = 0;
1008: for (int i = 0; i < bitsPerSample.length; i++) {
1009: totalBits += bitsPerSample[i];
1010: }
1011:
1012: // Packed: 3- or 4-band, 8- or 16-bit.
1013: if ((samplesPerPixel == 3 || samplesPerPixel == 4)
1014: && (totalBits == 8 || totalBits == 16)) {
1015: int redMask = createMask(bitsPerSample, 0);
1016: int greenMask = createMask(bitsPerSample, 1);
1017: int blueMask = createMask(bitsPerSample, 2);
1018: int alphaMask = (samplesPerPixel == 4) ? createMask(
1019: bitsPerSample, 3) : 0;
1020: int transferType = totalBits == 8 ? DataBuffer.TYPE_BYTE
1021: : DataBuffer.TYPE_USHORT;
1022: boolean alphaPremultiplied = false;
1023: if (extraSamples != null
1024: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
1025: alphaPremultiplied = true;
1026: }
1027: return ImageTypeSpecifier.createPacked(rgb, redMask,
1028: greenMask, blueMask, alphaMask, transferType,
1029: alphaPremultiplied);
1030: }
1031:
1032: // Generic components with 8X bits per sample.
1033: if (bitsPerSample[0] % 8 == 0) {
1034: // Check whether all bands have same bit depth.
1035: boolean allSameBitDepth = true;
1036: for (int i = 1; i < bitsPerSample.length; i++) {
1037: if (bitsPerSample[i] != bitsPerSample[i - 1]) {
1038: allSameBitDepth = false;
1039: break;
1040: }
1041: }
1042:
1043: // Proceed if all bands have same bit depth.
1044: if (allSameBitDepth) {
1045: // Determine the data type.
1046: int dataType = -1;
1047: boolean isDataTypeSet = false;
1048: switch (bitsPerSample[0]) {
1049: case 8:
1050: if (sampleFormat[0] != BaselineTIFFTagSet.SAMPLE_FORMAT_FLOATING_POINT) {
1051: // Ignore whether signed or unsigned:
1052: // treat all as unsigned.
1053: dataType = DataBuffer.TYPE_BYTE;
1054: isDataTypeSet = true;
1055: }
1056: break;
1057: case 16:
1058: if (sampleFormat[0] != BaselineTIFFTagSet.SAMPLE_FORMAT_FLOATING_POINT) {
1059: if (sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER) {
1060: dataType = DataBuffer.TYPE_SHORT;
1061: } else {
1062: dataType = DataBuffer.TYPE_USHORT;
1063: }
1064: isDataTypeSet = true;
1065: }
1066: break;
1067: case 32:
1068: if (sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_FLOATING_POINT) {
1069: dataType = DataBuffer.TYPE_FLOAT;
1070: } else {
1071: dataType = DataBuffer.TYPE_INT;
1072: }
1073: isDataTypeSet = true;
1074: break;
1075: }
1076:
1077: if (isDataTypeSet) {
1078: // Create the SampleModel.
1079: SampleModel sm = createInterleavedSM(dataType,
1080: samplesPerPixel);
1081:
1082: // Create the ColorModel.
1083: ColorModel cm;
1084: if (samplesPerPixel >= 1
1085: && samplesPerPixel <= 4
1086: && (dataType == DataBuffer.TYPE_INT || dataType == DataBuffer.TYPE_FLOAT)) {
1087: // Handle the 32-bit cases for 1-4 bands.
1088: ColorSpace cs = samplesPerPixel <= 2 ? ColorSpace
1089: .getInstance(ColorSpace.CS_GRAY)
1090: : rgb;
1091: boolean hasAlpha = ((samplesPerPixel % 2) == 0);
1092: boolean alphaPremultiplied = false;
1093: if (hasAlpha
1094: && extraSamples != null
1095: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
1096: alphaPremultiplied = true;
1097: }
1098:
1099: cm = createComponentCM(cs, samplesPerPixel,
1100: dataType, hasAlpha, alphaPremultiplied);
1101: } else {
1102: ColorSpace cs = new BogusColorSpace(
1103: samplesPerPixel);
1104: cm = createComponentCM(cs, samplesPerPixel,
1105: dataType, false, // hasAlpha
1106: false); // alphaPremultiplied
1107: }
1108: //System.out.println(cm); // XXX
1109: return new ImageTypeSpecifier(cm, sm);
1110: }
1111: }
1112: }
1113:
1114: // Other more bizarre cases including discontiguous DataBuffers
1115: // such as for the image in bug 4918959.
1116:
1117: if (colorMap == null
1118: && sampleFormat[0] != BaselineTIFFTagSet.SAMPLE_FORMAT_FLOATING_POINT) {
1119:
1120: // Determine size of largest sample.
1121: int maxBitsPerSample = 0;
1122: for (int i = 0; i < bitsPerSample.length; i++) {
1123: if (bitsPerSample[i] > maxBitsPerSample) {
1124: maxBitsPerSample = bitsPerSample[i];
1125: }
1126: }
1127:
1128: // Determine whether data are signed.
1129: boolean isSigned = (sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER);
1130:
1131: // Grayscale
1132: if (samplesPerPixel == 1) {
1133: int dataType = getDataTypeFromNumBits(maxBitsPerSample,
1134: isSigned);
1135:
1136: return ImageTypeSpecifier.createGrayscale(
1137: maxBitsPerSample, dataType, isSigned);
1138: }
1139:
1140: // Gray-alpha
1141: if (samplesPerPixel == 2) {
1142: boolean alphaPremultiplied = false;
1143: if (extraSamples != null
1144: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
1145: alphaPremultiplied = true;
1146: }
1147:
1148: int dataType = getDataTypeFromNumBits(maxBitsPerSample,
1149: isSigned);
1150:
1151: return ImageTypeSpecifier.createGrayscale(
1152: maxBitsPerSample, dataType, false,
1153: alphaPremultiplied);
1154: }
1155:
1156: if (samplesPerPixel == 3 || samplesPerPixel == 4) {
1157: if (totalBits <= 32 && !isSigned) {
1158: // Packed RGB or RGBA
1159: int redMask = createMask(bitsPerSample, 0);
1160: int greenMask = createMask(bitsPerSample, 1);
1161: int blueMask = createMask(bitsPerSample, 2);
1162: int alphaMask = (samplesPerPixel == 4) ? createMask(
1163: bitsPerSample, 3)
1164: : 0;
1165: int transferType = getDataTypeFromNumBits(
1166: totalBits, false);
1167: boolean alphaPremultiplied = false;
1168: if (extraSamples != null
1169: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
1170: alphaPremultiplied = true;
1171: }
1172: return ImageTypeSpecifier.createPacked(rgb,
1173: redMask, greenMask, blueMask, alphaMask,
1174: transferType, alphaPremultiplied);
1175: } else if (samplesPerPixel == 3) {
1176: // Interleaved RGB
1177: int[] bandOffsets = new int[] { 0, 1, 2 };
1178: int dataType = getDataTypeFromNumBits(
1179: maxBitsPerSample, isSigned);
1180: return ImageTypeSpecifier.createInterleaved(rgb,
1181: bandOffsets, dataType, false, false);
1182: } else if (samplesPerPixel == 4) {
1183: // Interleaved RGBA
1184: int[] bandOffsets = new int[] { 0, 1, 2, 3 };
1185: int dataType = getDataTypeFromNumBits(
1186: maxBitsPerSample, isSigned);
1187: boolean alphaPremultiplied = false;
1188: if (extraSamples != null
1189: && extraSamples[0] == BaselineTIFFTagSet.EXTRA_SAMPLES_ASSOCIATED_ALPHA) {
1190: alphaPremultiplied = true;
1191: }
1192: return ImageTypeSpecifier.createInterleaved(rgb,
1193: bandOffsets, dataType, true,
1194: alphaPremultiplied);
1195: }
1196: } else {
1197: // Arbitrary Interleaved.
1198: int dataType = getDataTypeFromNumBits(maxBitsPerSample,
1199: isSigned);
1200: SampleModel sm = createInterleavedSM(dataType,
1201: samplesPerPixel);
1202: ColorSpace cs = new BogusColorSpace(samplesPerPixel);
1203: ColorModel cm = createComponentCM(cs, samplesPerPixel,
1204: dataType, false, // hasAlpha
1205: false); // alphaPremultiplied
1206: return new ImageTypeSpecifier(cm, sm);
1207: }
1208: }
1209:
1210: if (DEBUG) {
1211: System.out.println("\nNo raw ITS available:");
1212:
1213: System.out.println("photometricInterpretation = "
1214: + photometricInterpretation);
1215: System.out.println("compression = " + compression);
1216: System.out.println("samplesPerPixel = " + samplesPerPixel);
1217: if (bitsPerSample != null) {
1218: for (int i = 0; i < bitsPerSample.length; i++) {
1219: System.out.println("bitsPerSample[" + i + "] = "
1220: + (int) bitsPerSample[i]);
1221: }
1222: }
1223: if (sampleFormat != null) {
1224: for (int i = 0; i < sampleFormat.length; i++) {
1225: System.out.println("sampleFormat[" + i + "] = "
1226: + (int) sampleFormat[i]);
1227: }
1228: }
1229: if (extraSamples != null) {
1230: for (int i = 0; i < extraSamples.length; i++) {
1231: System.out.println("extraSamples[" + i + "] = "
1232: + (int) extraSamples[i]);
1233: }
1234: }
1235: System.out.println("colorMap = " + colorMap);
1236: if (colorMap != null) {
1237: System.out.println("colorMap.length = "
1238: + colorMap.length);
1239: }
1240:
1241: throw new RuntimeException(
1242: "Unable to create an ImageTypeSpecifier");
1243: }
1244:
1245: return null;
1246: }
1247:
1248: /**
1249: * Sets the value of the <code>reader</code> field.
1250: *
1251: * <p> If this method is called, the <code>beginDecoding</code>
1252: * method must be called prior to calling any of the decode
1253: * methods.
1254: *
1255: * @param reader the current <code>ImageReader</code>.
1256: */
1257: public void setReader(ImageReader reader) {
1258: this .reader = reader;
1259: }
1260:
1261: /**
1262: * Sets the value of the <code>metadata</code> field.
1263: *
1264: * <p> If this method is called, the <code>beginDecoding</code>
1265: * method must be called prior to calling any of the decode
1266: * methods.
1267: *
1268: * @param metadata the <code>IIOMetadata</code> object for the
1269: * image being read.
1270: */
1271: public void setMetadata(IIOMetadata metadata) {
1272: this .metadata = metadata;
1273: }
1274:
1275: /**
1276: * Sets the value of the <code>photometricInterpretation</code>
1277: * field.
1278: *
1279: * <p> If this method is called, the <code>beginDecoding</code>
1280: * method must be called prior to calling any of the decode
1281: * methods.
1282: *
1283: * @param photometricInterpretation the photometric interpretation
1284: * value.
1285: */
1286: public void setPhotometricInterpretation(
1287: int photometricInterpretation) {
1288: this .photometricInterpretation = photometricInterpretation;
1289: }
1290:
1291: /**
1292: * Sets the value of the <code>compression</code> field.
1293: *
1294: * <p> If this method is called, the <code>beginDecoding</code>
1295: * method must be called prior to calling any of the decode
1296: * methods.
1297: *
1298: * @param compression the compression type.
1299: */
1300: public void setCompression(int compression) {
1301: this .compression = compression;
1302: }
1303:
1304: /**
1305: * Sets the value of the <code>planar</code> field.
1306: *
1307: * <p> If this method is called, the <code>beginDecoding</code>
1308: * method must be called prior to calling any of the decode
1309: * methods.
1310: *
1311: * @param planar <code>true</code> if the image to be decoded is
1312: * stored in planar format.
1313: */
1314: public void setPlanar(boolean planar) {
1315: this .planar = planar;
1316: }
1317:
1318: /**
1319: * Sets the value of the <code>samplesPerPixel</code> field.
1320: *
1321: * <p> If this method is called, the <code>beginDecoding</code>
1322: * method must be called prior to calling any of the decode
1323: * methods.
1324: *
1325: * @param samplesPerPixel the number of samples in each source
1326: * pixel.
1327: */
1328: public void setSamplesPerPixel(int samplesPerPixel) {
1329: this .samplesPerPixel = samplesPerPixel;
1330: }
1331:
1332: /**
1333: * Sets the value of the <code>bitsPerSample</code> field.
1334: *
1335: * <p> If this method is called, the <code>beginDecoding</code>
1336: * method must be called prior to calling any of the decode
1337: * methods.
1338: *
1339: * @param bitsPerSample the number of bits for each source image
1340: * sample.
1341: */
1342: public void setBitsPerSample(int[] bitsPerSample) {
1343: this .bitsPerSample = bitsPerSample == null ? null
1344: : (int[]) bitsPerSample.clone();
1345: }
1346:
1347: /**
1348: * Sets the value of the <code>sampleFormat</code> field.
1349: *
1350: * <p> If this method is called, the <code>beginDecoding</code>
1351: * method must be called prior to calling any of the decode
1352: * methods.
1353: *
1354: * @param sampleFormat the format of the source image data,
1355: * for example unsigned integer or floating-point.
1356: */
1357: public void setSampleFormat(int[] sampleFormat) {
1358: this .sampleFormat = sampleFormat == null ? new int[] { BaselineTIFFTagSet.SAMPLE_FORMAT_UNSIGNED_INTEGER }
1359: : (int[]) sampleFormat.clone();
1360: }
1361:
1362: /**
1363: * Sets the value of the <code>extraSamples</code> field.
1364: *
1365: * <p> If this method is called, the <code>beginDecoding</code>
1366: * method must be called prior to calling any of the decode
1367: * methods.
1368: *
1369: * @param extraSamples the interpretation of any samples in the
1370: * source file beyond those used for basic color or grayscale
1371: * information.
1372: */
1373: public void setExtraSamples(int[] extraSamples) {
1374: this .extraSamples = extraSamples == null ? null
1375: : (int[]) extraSamples.clone();
1376: }
1377:
1378: /**
1379: * Sets the value of the <code>colorMap</code> field.
1380: *
1381: * <p> If this method is called, the <code>beginDecoding</code>
1382: * method must be called prior to calling any of the decode
1383: * methods.
1384: *
1385: * @param colorMap the color map to apply to the source data,
1386: * as an array of <code>char</code>s.
1387: */
1388: public void setColorMap(char[] colorMap) {
1389: this .colorMap = colorMap == null ? null : (char[]) colorMap
1390: .clone();
1391: }
1392:
1393: /**
1394: * Sets the value of the <code>stream</code> field.
1395: *
1396: * <p> If this method is called, the <code>beginDecoding</code>
1397: * method must be called prior to calling any of the decode
1398: * methods.
1399: *
1400: * @param stream the <code>ImageInputStream</code> to be read.
1401: */
1402: public void setStream(ImageInputStream stream) {
1403: this .stream = stream;
1404: }
1405:
1406: /**
1407: * Sets the value of the <code>offset</code> field.
1408: *
1409: * <p> If this method is called, the <code>beginDecoding</code>
1410: * method must be called prior to calling any of the decode
1411: * methods.
1412: *
1413: * @param offset the offset of the beginning of the compressed
1414: * data.
1415: */
1416: public void setOffset(long offset) {
1417: this .offset = offset;
1418: }
1419:
1420: /**
1421: * Sets the value of the <code>byteCount</code> field.
1422: *
1423: * <p> If this method is called, the <code>beginDecoding</code>
1424: * method must be called prior to calling any of the decode
1425: * methods.
1426: *
1427: * @param byteCount the number of bytes of compressed data.
1428: */
1429: public void setByteCount(int byteCount) {
1430: this .byteCount = byteCount;
1431: }
1432:
1433: // Region of the file image represented in the stream
1434:
1435: /**
1436: * Sets the value of the <code>srcMinX</code> field.
1437: *
1438: * <p> If this method is called, the <code>beginDecoding</code>
1439: * method must be called prior to calling any of the decode
1440: * methods.
1441: *
1442: * @param srcMinX the minimum X coordinate of the source region
1443: * being decoded, irrespective of how it will be copied into the
1444: * destination.
1445: */
1446: public void setSrcMinX(int srcMinX) {
1447: this .srcMinX = srcMinX;
1448: }
1449:
1450: /**
1451: * Sets the value of the <code>srcMinY</code> field.
1452: *
1453: * <p> If this method is called, the <code>beginDecoding</code>
1454: * method must be called prior to calling any of the decode
1455: * methods.
1456: *
1457: * @param srcMinY the minimum Y coordinate of the source region
1458: * being decoded, irrespective of how it will be copied into the
1459: * destination.
1460: */
1461: public void setSrcMinY(int srcMinY) {
1462: this .srcMinY = srcMinY;
1463: }
1464:
1465: /**
1466: * Sets the value of the <code>srcWidth</code> field.
1467: *
1468: * <p> If this method is called, the <code>beginDecoding</code>
1469: * method must be called prior to calling any of the decode
1470: * methods.
1471: *
1472: * @param srcWidth the width of the source region being decoded,
1473: * irrespective of how it will be copied into the destination.
1474: */
1475: public void setSrcWidth(int srcWidth) {
1476: this .srcWidth = srcWidth;
1477: }
1478:
1479: /**
1480: * Sets the value of the <code>srcHeight</code> field.
1481: *
1482: * <p> If this method is called, the <code>beginDecoding</code>
1483: * method must be called prior to calling any of the decode
1484: * methods.
1485: *
1486: * @param srcHeight the height of the source region being decoded,
1487: * irrespective of how it will be copied into the destination.
1488: */
1489: public void setSrcHeight(int srcHeight) {
1490: this .srcHeight = srcHeight;
1491: }
1492:
1493: // First source pixel to be read
1494:
1495: /**
1496: * Sets the value of the <code>sourceXOffset</code> field.
1497: *
1498: * <p> If this method is called, the <code>beginDecoding</code>
1499: * method must be called prior to calling any of the decode
1500: * methods.
1501: *
1502: * @param sourceXOffset the horizontal source offset to be used when
1503: * mapping between source and destination coordinates.
1504: */
1505: public void setSourceXOffset(int sourceXOffset) {
1506: this .sourceXOffset = sourceXOffset;
1507: }
1508:
1509: /**
1510: * Sets the value of the <code>dstXOffset</code> field.
1511: *
1512: * <p> If this method is called, the <code>beginDecoding</code>
1513: * method must be called prior to calling any of the decode
1514: * methods.
1515: *
1516: * @param dstXOffset the horizontal destination offset to be
1517: * used when mapping between source and destination coordinates.
1518: */
1519: public void setDstXOffset(int dstXOffset) {
1520: this .dstXOffset = dstXOffset;
1521: }
1522:
1523: /**
1524: * Sets the value of the <code>sourceYOffset</code>.
1525: *
1526: * <p> If this method is called, the <code>beginDecoding</code>
1527: * method must be called prior to calling any of the decode
1528: * methods.
1529: *
1530: * @param sourceYOffset the vertical source offset to be used when
1531: * mapping between source and destination coordinates.
1532: */
1533: public void setSourceYOffset(int sourceYOffset) {
1534: this .sourceYOffset = sourceYOffset;
1535: }
1536:
1537: /**
1538: * Sets the value of the <code>dstYOffset</code> field.
1539: *
1540: * <p> If this method is called, the <code>beginDecoding</code>
1541: * method must be called prior to calling any of the decode
1542: * methods.
1543: *
1544: * @param dstYOffset the vertical destination offset to be
1545: * used when mapping between source and destination coordinates.
1546: */
1547: public void setDstYOffset(int dstYOffset) {
1548: this .dstYOffset = dstYOffset;
1549: }
1550:
1551: // Subsampling to be performed
1552:
1553: /**
1554: * Sets the value of the <code>subsampleX</code> field.
1555: *
1556: * <p> If this method is called, the <code>beginDecoding</code>
1557: * method must be called prior to calling any of the decode
1558: * methods.
1559: *
1560: * @param subsampleX the horizontal subsampling factor.
1561: *
1562: * @throws IllegalArgumentException if <code>subsampleX</code> is
1563: * less than or equal to 0.
1564: */
1565: public void setSubsampleX(int subsampleX) {
1566: if (subsampleX <= 0) {
1567: throw new IllegalArgumentException("subsampleX <= 0!");
1568: }
1569: this .subsampleX = subsampleX;
1570: }
1571:
1572: /**
1573: * Sets the value of the <code>subsampleY</code> field.
1574: *
1575: * <p> If this method is called, the <code>beginDecoding</code>
1576: * method must be called prior to calling any of the decode
1577: * methods.
1578: *
1579: * @param subsampleY the vertical subsampling factor.
1580: *
1581: * @throws IllegalArgumentException if <code>subsampleY</code> is
1582: * less than or equal to 0.
1583: */
1584: public void setSubsampleY(int subsampleY) {
1585: if (subsampleY <= 0) {
1586: throw new IllegalArgumentException("subsampleY <= 0!");
1587: }
1588: this .subsampleY = subsampleY;
1589: }
1590:
1591: // Band subsetting/rearrangement
1592:
1593: /**
1594: * Sets the value of the <code>sourceBands</code> field.
1595: *
1596: * <p> If this method is called, the <code>beginDecoding</code>
1597: * method must be called prior to calling any of the decode
1598: * methods.
1599: *
1600: * @param sourceBands an array of <code>int</code>s
1601: * specifying the source bands to be read.
1602: */
1603: public void setSourceBands(int[] sourceBands) {
1604: this .sourceBands = sourceBands == null ? null
1605: : (int[]) sourceBands.clone();
1606: }
1607:
1608: /**
1609: * Sets the value of the <code>destinationBands</code> field.
1610: *
1611: * <p> If this method is called, the <code>beginDecoding</code>
1612: * method must be called prior to calling any of the decode
1613: * methods.
1614: *
1615: * @param destinationBands an array of <code>int</code>s
1616: * specifying the destination bands to be written.
1617: */
1618: public void setDestinationBands(int[] destinationBands) {
1619: this .destinationBands = destinationBands == null ? null
1620: : (int[]) destinationBands.clone();
1621: }
1622:
1623: // Destination image and region
1624:
1625: /**
1626: * Sets the value of the <code>image</code> field.
1627: *
1628: * <p> If this method is called, the <code>beginDecoding</code>
1629: * method must be called prior to calling any of the decode
1630: * methods.
1631: *
1632: * @param image the destination <code>BufferedImage</code>.
1633: */
1634: public void setImage(BufferedImage image) {
1635: this .image = image;
1636: }
1637:
1638: /**
1639: * Sets the value of the <code>dstMinX</code> field.
1640: *
1641: * <p> If this method is called, the <code>beginDecoding</code>
1642: * method must be called prior to calling any of the decode
1643: * methods.
1644: *
1645: * @param dstMinX the minimum X coordinate of the destination
1646: * region.
1647: */
1648: public void setDstMinX(int dstMinX) {
1649: this .dstMinX = dstMinX;
1650: }
1651:
1652: /**
1653: * Sets the value of the <code>dstMinY</code> field.
1654: *
1655: * <p> If this method is called, the <code>beginDecoding</code>
1656: * method must be called prior to calling any of the decode
1657: * methods.
1658: *
1659: * @param dstMinY the minimum Y coordinate of the destination
1660: * region.
1661: */
1662: public void setDstMinY(int dstMinY) {
1663: this .dstMinY = dstMinY;
1664: }
1665:
1666: /**
1667: * Sets the value of the <code>dstWidth</code> field.
1668: *
1669: * <p> If this method is called, the <code>beginDecoding</code>
1670: * method must be called prior to calling any of the decode
1671: * methods.
1672: *
1673: * @param dstWidth the width of the destination region.
1674: */
1675: public void setDstWidth(int dstWidth) {
1676: this .dstWidth = dstWidth;
1677: }
1678:
1679: /**
1680: * Sets the value of the <code>dstHeight</code> field.
1681: *
1682: * <p> If this method is called, the <code>beginDecoding</code>
1683: * method must be called prior to calling any of the decode
1684: * methods.
1685: *
1686: * @param dstHeight the height of the destination region.
1687: */
1688: public void setDstHeight(int dstHeight) {
1689: this .dstHeight = dstHeight;
1690: }
1691:
1692: // Active source region
1693:
1694: /**
1695: * Sets the value of the <code>activeSrcMinX</code> field.
1696: *
1697: * <p> If this method is called, the <code>beginDecoding</code>
1698: * method must be called prior to calling any of the decode
1699: * methods.
1700: *
1701: * @param activeSrcMinX the minimum X coordinate of the active
1702: * source region.
1703: */
1704: public void setActiveSrcMinX(int activeSrcMinX) {
1705: this .activeSrcMinX = activeSrcMinX;
1706: }
1707:
1708: /**
1709: * Sets the value of the <code>activeSrcMinY</code> field.
1710: *
1711: * <p> If this method is called, the <code>beginDecoding</code>
1712: * method must be called prior to calling any of the decode
1713: * methods.
1714: *
1715: * @param activeSrcMinY the minimum Y coordinate of the active
1716: * source region.
1717: */
1718: public void setActiveSrcMinY(int activeSrcMinY) {
1719: this .activeSrcMinY = activeSrcMinY;
1720: }
1721:
1722: /**
1723: * Sets the value of the <code>activeSrcWidth</code> field.
1724: *
1725: * <p> If this method is called, the <code>beginDecoding</code>
1726: * method must be called prior to calling any of the decode
1727: * methods.
1728: *
1729: * @param activeSrcWidth the width of the active source region.
1730: */
1731: public void setActiveSrcWidth(int activeSrcWidth) {
1732: this .activeSrcWidth = activeSrcWidth;
1733: }
1734:
1735: /**
1736: * Sets the value of the <code>activeSrcHeight</code> field.
1737: *
1738: * <p> If this method is called, the <code>beginDecoding</code>
1739: * method must be called prior to calling any of the decode
1740: * methods.
1741: *
1742: * @param activeSrcHeight the height of the active source region.
1743: */
1744: public void setActiveSrcHeight(int activeSrcHeight) {
1745: this .activeSrcHeight = activeSrcHeight;
1746: }
1747:
1748: /**
1749: * Sets the <code>TIFFColorConverter</code> object describing the color
1750: * space of the encoded data in the input stream. If no
1751: * <code>TIFFColorConverter</code> is set, no conversion will be performed.
1752: *
1753: * @param colorConverter a <code>TIFFColorConverter</code> object, or
1754: * <code>null</code>.
1755: */
1756: public void setColorConverter(TIFFColorConverter colorConverter) {
1757: this .colorConverter = colorConverter;
1758: }
1759:
1760: /**
1761: * Returns an <code>ImageTypeSpecifier</code> describing an image
1762: * whose underlying data array has the same format as the raw
1763: * source pixel data.
1764: *
1765: * @return an <code>ImageTypeSpecifier</code>.
1766: */
1767: public ImageTypeSpecifier getRawImageType() {
1768: ImageTypeSpecifier its = getRawImageTypeSpecifier(
1769: photometricInterpretation, compression,
1770: samplesPerPixel, bitsPerSample, sampleFormat,
1771: extraSamples, colorMap);
1772: return its;
1773: }
1774:
1775: /**
1776: * Creates a <code>BufferedImage</code> whose underlying data
1777: * array will be suitable for holding the raw decoded output of
1778: * the <code>decodeRaw</code> method.
1779: *
1780: * <p> The default implementation calls
1781: * <code>getRawImageType</code>, and calls the resulting
1782: * <code>ImageTypeSpecifier</code>'s
1783: * <code>createBufferedImage</code> method.
1784: *
1785: * @return a <code>BufferedImage</code> whose underlying data
1786: * array has the same format as the raw source pixel data, or
1787: * <code>null</code> if it is not possible to create such an
1788: * image.
1789: */
1790: public BufferedImage createRawImage() {
1791: if (planar) {
1792: // Create a single-banded image of the appropriate data type.
1793:
1794: // Get the number of bits per sample.
1795: int bps = bitsPerSample[sourceBands[0]];
1796:
1797: // Determine the data type.
1798: int dataType;
1799: if (sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_FLOATING_POINT) {
1800: dataType = DataBuffer.TYPE_FLOAT;
1801: } else if (bps <= 8) {
1802: dataType = DataBuffer.TYPE_BYTE;
1803: } else if (bps <= 16) {
1804: if (sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER) {
1805: dataType = DataBuffer.TYPE_SHORT;
1806: } else {
1807: dataType = DataBuffer.TYPE_USHORT;
1808: }
1809: } else {
1810: dataType = DataBuffer.TYPE_INT;
1811: }
1812:
1813: ColorSpace csGray = ColorSpace
1814: .getInstance(ColorSpace.CS_GRAY);
1815:
1816: ImageTypeSpecifier its = null;
1817: // For planar images with 1, 2 or 4 bits per sample, we need to
1818: // use a MultiPixelPackedSampleModel so that when the TIFF
1819: // decoder properly decodes the data per pixel, we know how to
1820: // extract it back out into individual pixels. This is how the
1821: // pixels are actually stored in the planar bands.
1822: if (bps == 1 || bps == 2 || bps == 4) {
1823: int bits = bps;
1824: int size = 1 << bits;
1825: byte[] r = new byte[size];
1826: byte[] g = new byte[size];
1827: byte[] b = new byte[size];
1828: for (int j = 0; j < r.length; j++) {
1829: r[j] = 0;
1830: g[j] = 0;
1831: b[j] = 0;
1832: }
1833: ColorModel cmGray = new IndexColorModel(bits, size, r,
1834: g, b);
1835: SampleModel smGray = new MultiPixelPackedSampleModel(
1836: DataBuffer.TYPE_BYTE, 1, 1, bits);
1837: its = new ImageTypeSpecifier(cmGray, smGray);
1838: } else {
1839: its = ImageTypeSpecifier.createInterleaved(csGray,
1840: new int[] { 0 }, dataType, false, false);
1841: }
1842:
1843: return its.createBufferedImage(srcWidth, srcHeight);
1844:
1845: /* XXX Not necessarily byte for planar
1846: return new BufferedImage(srcWidth, srcHeight,
1847: BufferedImage.TYPE_BYTE_GRAY);
1848: */
1849: } else {
1850: ImageTypeSpecifier its = getRawImageType();
1851: if (its == null) {
1852: return null;
1853: }
1854:
1855: BufferedImage bi = its.createBufferedImage(srcWidth,
1856: srcHeight);
1857: return bi;
1858: }
1859: }
1860:
1861: /**
1862: * Decodes the source data into the provided <code>byte</code>
1863: * array <code>b</code>, starting at the offset given by
1864: * <code>dstOffset</code>. Each pixel occupies
1865: * <code>bitsPerPixel</code> bits, with no padding between pixels.
1866: * Scanlines are separated by <code>scanlineStride</code>
1867: * <code>byte</code>s.
1868: *
1869: * @param b a <code>byte</code> array to be written.
1870: * @param dstOffset the starting offset in <code>b</code> to be
1871: * written.
1872: * @param bitsPerPixel the number of bits for each pixel.
1873: * @param scanlineStride the number of <code>byte</code>s to
1874: * advance between that starting pixels of each scanline.
1875: *
1876: * @throws IOException if an error occurs reading from the source
1877: * <code>ImageInputStream</code>.
1878: */
1879: public abstract void decodeRaw(byte[] b, int dstOffset,
1880: int bitsPerPixel, int scanlineStride) throws IOException;
1881:
1882: /**
1883: * Decodes the source data into the provided <code>short</code>
1884: * array <code>s</code>, starting at the offset given by
1885: * <code>dstOffset</code>. Each pixel occupies
1886: * <code>bitsPerPixel</code> bits, with no padding between pixels.
1887: * Scanlines are separated by <code>scanlineStride</code>
1888: * <code>short</code>s
1889: *
1890: * <p> The default implementation calls <code>decodeRaw(byte[] b,
1891: * ...)</code> and copies the resulting data into <code>s</code>.
1892: *
1893: * @param s a <code>short</code> array to be written.
1894: * @param dstOffset the starting offset in <code>s</code> to be
1895: * written.
1896: * @param bitsPerPixel the number of bits for each pixel.
1897: * @param scanlineStride the number of <code>short</code>s to
1898: * advance between that starting pixels of each scanline.
1899: *
1900: * @throws IOException if an error occurs reading from the source
1901: * <code>ImageInputStream</code>.
1902: */
1903: public void decodeRaw(short[] s, int dstOffset, int bitsPerPixel,
1904: int scanlineStride) throws IOException {
1905: int bytesPerRow = (srcWidth * bitsPerPixel + 7) / 8;
1906: int shortsPerRow = bytesPerRow / 2;
1907:
1908: byte[] b = new byte[bytesPerRow * srcHeight];
1909: decodeRaw(b, 0, bitsPerPixel, bytesPerRow);
1910:
1911: int bOffset = 0;
1912: if (stream.getByteOrder() == ByteOrder.BIG_ENDIAN) {
1913: for (int j = 0; j < srcHeight; j++) {
1914: for (int i = 0; i < shortsPerRow; i++) {
1915: short hiVal = b[bOffset++];
1916: short loVal = b[bOffset++];
1917: short sval = (short) ((hiVal << 8) | (loVal & 0xff));
1918: s[dstOffset + i] = sval;
1919: }
1920:
1921: dstOffset += scanlineStride;
1922: }
1923: } else { // ByteOrder.LITLE_ENDIAN
1924: for (int j = 0; j < srcHeight; j++) {
1925: for (int i = 0; i < shortsPerRow; i++) {
1926: short loVal = b[bOffset++];
1927: short hiVal = b[bOffset++];
1928: short sval = (short) ((hiVal << 8) | (loVal & 0xff));
1929: s[dstOffset + i] = sval;
1930: }
1931:
1932: dstOffset += scanlineStride;
1933: }
1934: }
1935: }
1936:
1937: /**
1938: * Decodes the source data into the provided <code>int</code>
1939: * array <code>i</code>, starting at the offset given by
1940: * <code>dstOffset</code>. Each pixel occupies
1941: * <code>bitsPerPixel</code> bits, with no padding between pixels.
1942: * Scanlines are separated by <code>scanlineStride</code>
1943: * <code>int</code>s.
1944: *
1945: * <p> The default implementation calls <code>decodeRaw(byte[] b,
1946: * ...)</code> and copies the resulting data into <code>i</code>.
1947: *
1948: * @param i an <code>int</code> array to be written.
1949: * @param dstOffset the starting offset in <code>i</code> to be
1950: * written.
1951: * @param bitsPerPixel the number of bits for each pixel.
1952: * @param scanlineStride the number of <code>int</code>s to
1953: * advance between that starting pixels of each scanline.
1954: *
1955: * @throws IOException if an error occurs reading from the source
1956: * <code>ImageInputStream</code>.
1957: */
1958: public void decodeRaw(int[] i, int dstOffset, int bitsPerPixel,
1959: int scanlineStride) throws IOException {
1960: int numBands = bitsPerPixel / 32;
1961: int intsPerRow = srcWidth * numBands;
1962: int bytesPerRow = intsPerRow * 4;
1963:
1964: byte[] b = new byte[bytesPerRow * srcHeight];
1965: decodeRaw(b, 0, bitsPerPixel, bytesPerRow);
1966:
1967: int bOffset = 0;
1968: if (stream.getByteOrder() == ByteOrder.BIG_ENDIAN) {
1969: for (int j = 0; j < srcHeight; j++) {
1970: for (int k = 0; k < intsPerRow; k++) {
1971: int v0 = b[bOffset++] & 0xff;
1972: int v1 = b[bOffset++] & 0xff;
1973: int v2 = b[bOffset++] & 0xff;
1974: int v3 = b[bOffset++] & 0xff;
1975: int ival = (v0 << 24) | (v1 << 16) | (v2 << 8) | v3;
1976: i[dstOffset + k] = ival;
1977: }
1978:
1979: dstOffset += scanlineStride;
1980: }
1981: } else { // ByteOrder.LITLE_ENDIAN
1982: for (int j = 0; j < srcHeight; j++) {
1983: for (int k = 0; k < intsPerRow; k++) {
1984: int v3 = b[bOffset++] & 0xff;
1985: int v2 = b[bOffset++] & 0xff;
1986: int v1 = b[bOffset++] & 0xff;
1987: int v0 = b[bOffset++] & 0xff;
1988: int ival = (v0 << 24) | (v1 << 16) | (v2 << 8) | v3;
1989: i[dstOffset + k] = ival;
1990: }
1991:
1992: dstOffset += scanlineStride;
1993: }
1994: }
1995: }
1996:
1997: /**
1998: * Decodes the source data into the provided <code>float</code>
1999: * array <code>f</code>, starting at the offset given by
2000: * <code>dstOffset</code>. Each pixel occupies
2001: * <code>bitsPerPixel</code> bits, with no padding between pixels.
2002: * Scanlines are separated by <code>scanlineStride</code>
2003: * <code>float</code>s.
2004: *
2005: * <p> The default implementation calls <code>decodeRaw(byte[] b,
2006: * ...)</code> and copies the resulting data into <code>f</code>.
2007: *
2008: * @param f a <code>float</code> array to be written.
2009: * @param dstOffset the starting offset in <code>f</code> to be
2010: * written.
2011: * @param bitsPerPixel the number of bits for each pixel.
2012: * @param scanlineStride the number of <code>float</code>s to
2013: * advance between that starting pixels of each scanline.
2014: *
2015: * @throws IOException if an error occurs reading from the source
2016: * <code>ImageInputStream</code>.
2017: */
2018: public void decodeRaw(float[] f, int dstOffset, int bitsPerPixel,
2019: int scanlineStride) throws IOException {
2020: int numBands = bitsPerPixel / 32;
2021: int floatsPerRow = srcWidth * numBands;
2022: int bytesPerRow = floatsPerRow * 4;
2023:
2024: byte[] b = new byte[bytesPerRow * srcHeight];
2025: decodeRaw(b, 0, bitsPerPixel, bytesPerRow);
2026:
2027: int bOffset = 0;
2028: if (stream.getByteOrder() == ByteOrder.BIG_ENDIAN) {
2029: for (int j = 0; j < srcHeight; j++) {
2030: for (int i = 0; i < floatsPerRow; i++) {
2031: int v0 = b[bOffset++] & 0xff;
2032: int v1 = b[bOffset++] & 0xff;
2033: int v2 = b[bOffset++] & 0xff;
2034: int v3 = b[bOffset++] & 0xff;
2035: int ival = (v0 << 24) | (v1 << 16) | (v2 << 8) | v3;
2036: float fval = Float.intBitsToFloat(ival);
2037: f[dstOffset + i] = fval;
2038: }
2039:
2040: dstOffset += scanlineStride;
2041: }
2042: } else { // ByteOrder.LITLE_ENDIAN
2043: for (int j = 0; j < srcHeight; j++) {
2044: for (int i = 0; i < floatsPerRow; i++) {
2045: int v3 = b[bOffset++] & 0xff;
2046: int v2 = b[bOffset++] & 0xff;
2047: int v1 = b[bOffset++] & 0xff;
2048: int v0 = b[bOffset++] & 0xff;
2049: int ival = (v0 << 24) | (v1 << 16) | (v2 << 8) | v3;
2050: float fval = Float.intBitsToFloat(ival);
2051: f[dstOffset + i] = fval;
2052: }
2053:
2054: dstOffset += scanlineStride;
2055: }
2056: }
2057: }
2058:
2059: //
2060: // Values used to prevent unneeded recalculation of bit adjustment table.
2061: //
2062: private boolean isFirstBitDepthTable = true;
2063: private boolean planarCache = false;
2064: private int[] destBitsPerSampleCache = null;
2065: private int[] sourceBandsCache = null;
2066: private int[] bitsPerSampleCache = null;
2067: private int[] destinationBandsCache = null;
2068:
2069: /**
2070: * This routine is called prior to a sequence of calls to the
2071: * <code>decode</code> method, in order to allow any necessary
2072: * tables or other structures to be initialized based on metadata
2073: * values. This routine is guaranteed to be called any time the
2074: * metadata values have changed.
2075: *
2076: * <p> The default implementation computes tables used by the
2077: * <code>decode</code> method to rescale components to different
2078: * bit depths. Thus, if this method is overridden, it is
2079: * important for the subclass method to call <code>super()</code>,
2080: * unless it overrides <code>decode</code> as well.
2081: */
2082: public void beginDecoding() {
2083: // Note: This method assumes that sourceBands, destinationBands,
2084: // and bitsPerSample are all non-null which is true as they are
2085: // set up that way in TIFFImageReader. Also the lengths and content
2086: // of sourceBands and destinationBands are checked in TIFFImageReader
2087: // before the present method is invoked.
2088:
2089: // Determine if all of the relevant output bands have the
2090: // same bit depth as the source data
2091: this .adjustBitDepths = false;
2092: int numBands = destinationBands.length;
2093: int[] destBitsPerSample = null;
2094: if (planar) {
2095: int totalNumBands = bitsPerSample.length;
2096: destBitsPerSample = new int[totalNumBands];
2097: int dbps = image.getSampleModel().getSampleSize(0);
2098: for (int b = 0; b < totalNumBands; b++) {
2099: destBitsPerSample[b] = dbps;
2100: }
2101: } else {
2102: destBitsPerSample = image.getSampleModel().getSampleSize();
2103: }
2104:
2105: // Make sure that the image is not CMYK (separated) or does not have
2106: // bits per sample of 1, 2, or 4 before trying adjust.
2107: if (photometricInterpretation != BaselineTIFFTagSet.PHOTOMETRIC_INTERPRETATION_CMYK
2108: || bitsPerSample[0] != 1
2109: && bitsPerSample[0] != 2
2110: && bitsPerSample[0] != 4) {
2111: for (int b = 0; b < numBands; b++) {
2112: if (destBitsPerSample[destinationBands[b]] != bitsPerSample[sourceBands[b]]) {
2113: adjustBitDepths = true;
2114: break;
2115: }
2116: }
2117: }
2118:
2119: // If the bit depths differ, create a lookup table
2120: // per band to perform the conversion
2121: if (adjustBitDepths) {
2122: // Compute the table only if this is the first time one is
2123: // being computed or if any of the variables on which the
2124: // table is based have changed.
2125: if (this .isFirstBitDepthTable
2126: || planar != planarCache
2127: || !areIntArraysEqual(destBitsPerSample,
2128: destBitsPerSampleCache)
2129: || !areIntArraysEqual(sourceBands, sourceBandsCache)
2130: || !areIntArraysEqual(bitsPerSample,
2131: bitsPerSampleCache)
2132: || !areIntArraysEqual(destinationBands,
2133: destinationBandsCache)) {
2134:
2135: this .isFirstBitDepthTable = false;
2136:
2137: // Cache some variables.
2138: this .planarCache = planar;
2139: this .destBitsPerSampleCache = (int[]) destBitsPerSample
2140: .clone(); // never null ...
2141: this .sourceBandsCache = sourceBands == null ? null
2142: : (int[]) sourceBands.clone();
2143: this .bitsPerSampleCache = bitsPerSample == null ? null
2144: : (int[]) bitsPerSample.clone();
2145: this .destinationBandsCache = destinationBands == null ? null
2146: : (int[]) destinationBands.clone();
2147:
2148: // Allocate and fill the table.
2149: bitDepthScale = new int[numBands][];
2150: for (int b = 0; b < numBands; b++) {
2151: int maxInSample = (1 << bitsPerSample[sourceBands[b]]) - 1;
2152: int halfMaxInSample = maxInSample / 2;
2153:
2154: int maxOutSample = (1 << destBitsPerSample[destinationBands[b]]) - 1;
2155:
2156: bitDepthScale[b] = new int[maxInSample + 1];
2157: for (int s = 0; s <= maxInSample; s++) {
2158: bitDepthScale[b][s] = (s * maxOutSample + halfMaxInSample)
2159: / maxInSample;
2160: }
2161: }
2162: }
2163: } else { // !adjustBitDepths
2164: // Clear any prior table.
2165: this .bitDepthScale = null;
2166: }
2167:
2168: // Determine whether source and destination band lists are ramps.
2169: // Note that these conditions will be true for planar images if
2170: // and only if samplesPerPixel == 1, sourceBands[0] == 0, and
2171: // destinationBands[0] == 0. For the purposes of this method, the
2172: // only difference between such a planar image and a chunky image
2173: // is the setting of the PlanarConfiguration field.
2174: boolean sourceBandsNormal = false;
2175: boolean destinationBandsNormal = false;
2176: if (numBands == samplesPerPixel) {
2177: sourceBandsNormal = true;
2178: destinationBandsNormal = true;
2179: for (int i = 0; i < numBands; i++) {
2180: if (sourceBands[i] != i) {
2181: sourceBandsNormal = false;
2182: }
2183: if (destinationBands[i] != i) {
2184: destinationBandsNormal = false;
2185: }
2186: }
2187: }
2188:
2189: // Determine whether the image is bilevel and/or contiguous.
2190: // Note that a planar image could be bilevel but it will not
2191: // be contiguous unless it has a single component band stored
2192: // in a single bank.
2193: this .isBilevel = ImageUtil.isBinary(this .image.getRaster()
2194: .getSampleModel());
2195: this .isContiguous = this .isBilevel ? true : ImageUtil
2196: .imageIsContiguous(this .image);
2197:
2198: // Analyze destination image to see if we can copy into it
2199: // directly
2200:
2201: this .isImageSimple = (colorConverter == null)
2202: && (subsampleX == 1) && (subsampleY == 1)
2203: && (srcWidth == dstWidth) && (srcHeight == dstHeight)
2204: && ((dstMinX + dstWidth) <= image.getWidth())
2205: && ((dstMinY + dstHeight) <= image.getHeight())
2206: && sourceBandsNormal && destinationBandsNormal
2207: && !adjustBitDepths;
2208: }
2209:
2210: /**
2211: * Decodes the input bit stream (located in the
2212: * <code>ImageInputStream</code> <code>stream</code>, at offset
2213: * <code>offset</code>, and continuing for <code>byteCount</code>
2214: * bytes) into the output <code>BufferedImage</code>
2215: * <code>image</code>.
2216: *
2217: * <p> The default implementation analyzes the destination image
2218: * to determine if it is suitable as the destination for the
2219: * <code>decodeRaw</code> method. If not, a suitable image is
2220: * created. Next, <code>decodeRaw</code> is called to perform the
2221: * actual decoding, and the results are copied into the
2222: * destination image if necessary. Subsampling and offsetting are
2223: * performed automatically.
2224: *
2225: * <p> The precise responsibilities of this routine are as
2226: * follows. The input bit stream is defined by the instance
2227: * variables <code>stream</code>, <code>offset</code>, and
2228: * <code>byteCount</code>. These bits contain the data for the
2229: * region of the source image defined by <code>srcMinX</code>,
2230: * <code>srcMinY</code>, <code>srcWidth</code>, and
2231: * <code>srcHeight</code>.
2232: *
2233: * <p> The source data is required to be subsampling, starting at
2234: * the <code>sourceXOffset</code>th column and including
2235: * every <code>subsampleX</code>th pixel thereafter (and similarly
2236: * for <code>sourceYOffset</code> and
2237: * <code>subsampleY</code>).
2238: *
2239: * <p> Pixels are copied into the destination with an addition shift of
2240: * (<code>dstXOffset</code>, <code>dstYOffset</code>). The complete
2241: * set of formulas relating the source and destination coordinate spaces
2242: * are:
2243: *
2244: * <pre>
2245: * dx = (sx - sourceXOffset)/subsampleX + dstXOffset;
2246: * dy = (sy - sourceYOffset)/subsampleY + dstYOffset;
2247: * </pre>
2248: *
2249: * Only source pixels such that <code>(sx - sourceXOffset) %
2250: * subsampleX == 0</code> and <code>(sy - sourceYOffset) %
2251: * subsampleY == 0</code> are copied.
2252: *
2253: * <p> The inverse mapping, from destination to source coordinates,
2254: * is one-to-one:
2255: *
2256: * <pre>
2257: * sx = (dx - dstXOffset)*subsampleX + sourceXOffset;
2258: * sy = (dy - dstYOffset)*subsampleY + sourceYOffset;
2259: * </pre>
2260: *
2261: * <p> The region of the destination image to be updated is given
2262: * by the instance variables <code>dstMinX</code>,
2263: * <code>dstMinY</code>, <code>dstWidth</code>, and
2264: * <code>dstHeight</code>.
2265: *
2266: * <p> It is possible that not all of the source data being read
2267: * will contribute to the destination image. For example, the
2268: * destination offsets could be set such that some of the source
2269: * pixels land outside of the bounds of the image. As a
2270: * convenience, the bounds of the active source region (that is,
2271: * the region of the strip or tile being read that actually
2272: * contributes to the destination image, taking clipping into
2273: * account) are available as <code>activeSrcMinX</code>,
2274: * <code>activeSrcMinY</code>, <code>activeSrcWidth</code> and
2275: * <code>activeSrcHeight</code>. Thus, the source pixel at
2276: * (<code>activeSrcMinX</code>, <code>activeSrcMinY</code>) will
2277: * map to the destination pixel (<code>dstMinX</code>,
2278: * <code>dstMinY</code>).
2279: *
2280: * <p> The sequence of source bands given by
2281: * <code>sourceBands</code> are to be copied into the sequence of
2282: * bands in the destination given by
2283: * <code>destinationBands</code>.
2284: *
2285: * <p> Some standard tag information is provided the instance
2286: * variables <code>photometricInterpretation</code>,
2287: * <code>compression</code>, <code>samplesPerPixel</code>,
2288: * <code>bitsPerSample</code>, <code>sampleFormat</code>,
2289: * <code>extraSamples</code>, and <code>colorMap</code>.
2290: *
2291: * <p> In practice, unless there is a significant performance
2292: * advantage to be gained by overriding this routine, most users
2293: * will prefer to use the default implementation of this routine,
2294: * and instead override the <code>decodeRaw</code> and/or
2295: * <code>getRawImageType</code> methods.
2296: *
2297: * @exception IOException if an error occurs in
2298: * <code>decodeRaw</code>.
2299: */
2300: public void decode() throws IOException {
2301: byte[] byteData = null;
2302: short[] shortData = null;
2303: int[] intData = null;
2304: float[] floatData = null;
2305:
2306: int dstOffset = 0;
2307: int pixelBitStride = 1;
2308: int scanlineStride = 0;
2309:
2310: // Analyze raw image
2311:
2312: this .rawImage = null;
2313: if (isImageSimple) {
2314: if (isBilevel) {
2315: rawImage = this .image;
2316: } else if (isContiguous) {
2317: rawImage = image.getSubimage(dstMinX, dstMinY,
2318: dstWidth, dstHeight);
2319: }
2320: }
2321:
2322: boolean isDirectCopy = rawImage != null;
2323:
2324: if (rawImage == null) {
2325: rawImage = createRawImage();
2326: if (rawImage == null) {
2327: throw new IIOException("Couldn't create image buffer!");
2328: }
2329: }
2330:
2331: WritableRaster ras = rawImage.getRaster();
2332:
2333: if (isBilevel) {
2334: Rectangle rect = isImageSimple ? new Rectangle(dstMinX,
2335: dstMinY, dstWidth, dstHeight) : ras.getBounds();
2336: byteData = ImageUtil.getPackedBinaryData(ras, rect);
2337: dstOffset = 0;
2338: pixelBitStride = 1;
2339: scanlineStride = (rect.width + 7) / 8;
2340: } else {
2341: SampleModel sm = ras.getSampleModel();
2342: DataBuffer db = ras.getDataBuffer();
2343:
2344: boolean isSupportedType = false;
2345:
2346: if (sm instanceof ComponentSampleModel) {
2347: ComponentSampleModel csm = (ComponentSampleModel) sm;
2348: dstOffset = csm.getOffset(-ras
2349: .getSampleModelTranslateX(), -ras
2350: .getSampleModelTranslateY());
2351: scanlineStride = csm.getScanlineStride();
2352: if (db instanceof DataBufferByte) {
2353: DataBufferByte dbb = (DataBufferByte) db;
2354:
2355: byteData = dbb.getData();
2356: pixelBitStride = csm.getPixelStride() * 8;
2357: isSupportedType = true;
2358: } else if (db instanceof DataBufferUShort) {
2359: DataBufferUShort dbus = (DataBufferUShort) db;
2360:
2361: shortData = dbus.getData();
2362: pixelBitStride = csm.getPixelStride() * 16;
2363: isSupportedType = true;
2364: } else if (db instanceof DataBufferShort) {
2365: DataBufferShort dbs = (DataBufferShort) db;
2366:
2367: shortData = dbs.getData();
2368: pixelBitStride = csm.getPixelStride() * 16;
2369: isSupportedType = true;
2370: } else if (db instanceof DataBufferInt) {
2371: DataBufferInt dbi = (DataBufferInt) db;
2372:
2373: intData = dbi.getData();
2374: pixelBitStride = csm.getPixelStride() * 32;
2375: isSupportedType = true;
2376: } else if (db instanceof DataBufferFloat) {
2377: DataBufferFloat dbf = (DataBufferFloat) db;
2378:
2379: floatData = dbf.getData();
2380: pixelBitStride = csm.getPixelStride() * 32;
2381: isSupportedType = true;
2382: }
2383: } else if (sm instanceof MultiPixelPackedSampleModel) {
2384: MultiPixelPackedSampleModel mppsm = (MultiPixelPackedSampleModel) sm;
2385: dstOffset = mppsm.getOffset(-ras
2386: .getSampleModelTranslateX(), -ras
2387: .getSampleModelTranslateY());
2388: pixelBitStride = mppsm.getPixelBitStride();
2389: scanlineStride = mppsm.getScanlineStride();
2390: if (db instanceof DataBufferByte) {
2391: DataBufferByte dbb = (DataBufferByte) db;
2392:
2393: byteData = dbb.getData();
2394: isSupportedType = true;
2395: } else if (db instanceof DataBufferUShort) {
2396: DataBufferUShort dbus = (DataBufferUShort) db;
2397:
2398: shortData = dbus.getData();
2399: isSupportedType = true;
2400: } else if (db instanceof DataBufferInt) {
2401: DataBufferInt dbi = (DataBufferInt) db;
2402:
2403: intData = dbi.getData();
2404: isSupportedType = true;
2405: }
2406: } else if (sm instanceof SinglePixelPackedSampleModel) {
2407: SinglePixelPackedSampleModel sppsm = (SinglePixelPackedSampleModel) sm;
2408: dstOffset = sppsm.getOffset(-ras
2409: .getSampleModelTranslateX(), -ras
2410: .getSampleModelTranslateY());
2411: scanlineStride = sppsm.getScanlineStride();
2412: if (db instanceof DataBufferByte) {
2413: DataBufferByte dbb = (DataBufferByte) db;
2414:
2415: byteData = dbb.getData();
2416: pixelBitStride = 8;
2417: isSupportedType = true;
2418: } else if (db instanceof DataBufferUShort) {
2419: DataBufferUShort dbus = (DataBufferUShort) db;
2420:
2421: shortData = dbus.getData();
2422: pixelBitStride = 16;
2423: isSupportedType = true;
2424: } else if (db instanceof DataBufferInt) {
2425: DataBufferInt dbi = (DataBufferInt) db;
2426:
2427: intData = dbi.getData();
2428: pixelBitStride = 32;
2429: isSupportedType = true;
2430: }
2431: }
2432:
2433: if (!isSupportedType) {
2434: throw new IIOException(
2435: "Unsupported raw image type: SampleModel = "
2436: + sm + "; DataBuffer = " + db);
2437: }
2438: }
2439:
2440: if (isBilevel) {
2441: // Bilevel data are always in a contiguous byte buffer.
2442: decodeRaw(byteData, dstOffset, pixelBitStride,
2443: scanlineStride);
2444: } else {
2445: SampleModel sm = ras.getSampleModel();
2446:
2447: // Branch based on whether data are bit-contiguous, i.e.,
2448: // data are packaed as tightly as possible leaving no unused
2449: // bits except at the end of a row.
2450: if (isDataBufferBitContiguous(sm)) {
2451: // Use byte or float data directly.
2452: if (byteData != null) {
2453: if (DEBUG) {
2454: System.out.println("Decoding bytes directly");
2455: }
2456: decodeRaw(byteData, dstOffset, pixelBitStride,
2457: scanlineStride);
2458: } else if (floatData != null) {
2459: if (DEBUG) {
2460: System.out.println("Decoding floats directly");
2461: }
2462: decodeRaw(floatData, dstOffset, pixelBitStride,
2463: scanlineStride);
2464: } else {
2465: if (shortData != null) {
2466: if (areSampleSizesEqual(sm)
2467: && sm.getSampleSize(0) == 16) {
2468: if (DEBUG) {
2469: System.out
2470: .println("Decoding shorts directly");
2471: }
2472: // Decode directly into short data.
2473: decodeRaw(shortData, dstOffset,
2474: pixelBitStride, scanlineStride);
2475: } else {
2476: if (DEBUG) {
2477: System.out
2478: .println("Decoding bytes->shorts");
2479: }
2480: // Decode into bytes and reformat into shorts.
2481: int bpp = getBitsPerPixel(sm);
2482: int bytesPerRow = (bpp * srcWidth + 7) / 8;
2483: byte[] buf = new byte[bytesPerRow
2484: * srcHeight];
2485: decodeRaw(buf, 0, bpp, bytesPerRow);
2486: reformatData(buf, bytesPerRow, srcHeight,
2487: shortData, null, dstOffset,
2488: scanlineStride);
2489: }
2490: } else if (intData != null) {
2491: if (areSampleSizesEqual(sm)
2492: && sm.getSampleSize(0) == 32) {
2493: if (DEBUG) {
2494: System.out
2495: .println("Decoding ints directly");
2496: }
2497: // Decode directly into int data.
2498: decodeRaw(intData, dstOffset,
2499: pixelBitStride, scanlineStride);
2500: } else {
2501: if (DEBUG) {
2502: System.out
2503: .println("Decoding bytes->ints");
2504: }
2505: // Decode into bytes and reformat into ints.
2506: int bpp = getBitsPerPixel(sm);
2507: int bytesPerRow = (bpp * srcWidth + 7) / 8;
2508: byte[] buf = new byte[bytesPerRow
2509: * srcHeight];
2510: decodeRaw(buf, 0, bpp, bytesPerRow);
2511: reformatData(buf, bytesPerRow, srcHeight,
2512: null, intData, dstOffset,
2513: scanlineStride);
2514: }
2515: }
2516: }
2517: } else {
2518: if (DEBUG) {
2519: System.out.println("Decoding discontiguous data");
2520: }
2521: // Read discontiguous data into bytes and set the samples
2522: // into the Raster.
2523: int bpp = getBitsPerPixel(sm);
2524: int bytesPerRow = (bpp * srcWidth + 7) / 8;
2525: byte[] buf = new byte[bytesPerRow * srcHeight];
2526: decodeRaw(buf, 0, bpp, bytesPerRow);
2527: reformatDiscontiguousData(buf, bytesPerRow, srcWidth,
2528: srcHeight, ras);
2529: }
2530: }
2531:
2532: // System.out.println("colorConverter = " + colorConverter);
2533:
2534: if (colorConverter != null) {
2535: float[] rgb = new float[3];
2536:
2537: if (byteData != null) {
2538: for (int j = 0; j < dstHeight; j++) {
2539: int idx = dstOffset;
2540: for (int i = 0; i < dstWidth; i++) {
2541: float x0 = (float) (byteData[idx] & 0xff);
2542: float x1 = (float) (byteData[idx + 1] & 0xff);
2543: float x2 = (float) (byteData[idx + 2] & 0xff);
2544:
2545: colorConverter.toRGB(x0, x1, x2, rgb);
2546:
2547: byteData[idx] = (byte) (rgb[0]);
2548: byteData[idx + 1] = (byte) (rgb[1]);
2549: byteData[idx + 2] = (byte) (rgb[2]);
2550:
2551: idx += 3;
2552: }
2553:
2554: dstOffset += scanlineStride;
2555: }
2556: } else if (shortData != null) {
2557: if (sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER) {
2558: for (int j = 0; j < dstHeight; j++) {
2559: int idx = dstOffset;
2560: for (int i = 0; i < dstWidth; i++) {
2561: float x0 = (float) shortData[idx];
2562: float x1 = (float) shortData[idx + 1];
2563: float x2 = (float) shortData[idx + 2];
2564:
2565: colorConverter.toRGB(x0, x1, x2, rgb);
2566:
2567: shortData[idx] = (short) (rgb[0]);
2568: shortData[idx + 1] = (short) (rgb[1]);
2569: shortData[idx + 2] = (short) (rgb[2]);
2570:
2571: idx += 3;
2572: }
2573:
2574: dstOffset += scanlineStride;
2575: }
2576: } else {
2577: for (int j = 0; j < dstHeight; j++) {
2578: int idx = dstOffset;
2579: for (int i = 0; i < dstWidth; i++) {
2580: float x0 = (float) (shortData[idx] & 0xffff);
2581: float x1 = (float) (shortData[idx + 1] & 0xffff);
2582: float x2 = (float) (shortData[idx + 2] & 0xffff);
2583:
2584: colorConverter.toRGB(x0, x1, x2, rgb);
2585:
2586: shortData[idx] = (short) (rgb[0]);
2587: shortData[idx + 1] = (short) (rgb[1]);
2588: shortData[idx + 2] = (short) (rgb[2]);
2589:
2590: idx += 3;
2591: }
2592:
2593: dstOffset += scanlineStride;
2594: }
2595: }
2596: } else if (intData != null) {
2597: for (int j = 0; j < dstHeight; j++) {
2598: int idx = dstOffset;
2599: for (int i = 0; i < dstWidth; i++) {
2600: float x0 = (float) intData[idx];
2601: float x1 = (float) intData[idx + 1];
2602: float x2 = (float) intData[idx + 2];
2603:
2604: colorConverter.toRGB(x0, x1, x2, rgb);
2605:
2606: intData[idx] = (int) (rgb[0]);
2607: intData[idx + 1] = (int) (rgb[1]);
2608: intData[idx + 2] = (int) (rgb[2]);
2609:
2610: idx += 3;
2611: }
2612:
2613: dstOffset += scanlineStride;
2614: }
2615: } else if (floatData != null) {
2616: for (int j = 0; j < dstHeight; j++) {
2617: int idx = dstOffset;
2618: for (int i = 0; i < dstWidth; i++) {
2619: float x0 = floatData[idx];
2620: float x1 = floatData[idx + 1];
2621: float x2 = floatData[idx + 2];
2622:
2623: colorConverter.toRGB(x0, x1, x2, rgb);
2624:
2625: floatData[idx] = rgb[0];
2626: floatData[idx + 1] = rgb[1];
2627: floatData[idx + 2] = rgb[2];
2628:
2629: idx += 3;
2630: }
2631:
2632: dstOffset += scanlineStride;
2633: }
2634: }
2635:
2636: // int[] p = new int[3];
2637: // ras.getPixel(0, 0, p);
2638: // System.out.println("p00 = " +
2639: // p[0] + " " + p[1] + " " + p[2]);
2640: // ras.getPixel(1, 0, p);
2641: // System.out.println("p10 = " +
2642: // p[0] + " " + p[1] + " " + p[2]);
2643: // ras.getPixel(2, 0, p);
2644: // System.out.println("p20 = " +
2645: // p[0] + " " + p[1] + " " + p[2]);
2646: // ras.getPixel(3, 0, p);
2647: // System.out.println("p30 = " +
2648: // p[0] + " " + p[1] + " " + p[2]);
2649:
2650: // ColorSpace rgb = ColorSpace.getInstance(ColorSpace.CS_sRGB);
2651: // ColorConvertOp op = new ColorConvertOp(colorSpace, rgb, null);
2652: // WritableRaster dest = op.createCompatibleDestRaster(ras);
2653: // op.filter(ras, dest);
2654: // ras = dest;
2655: }
2656:
2657: if (photometricInterpretation == BaselineTIFFTagSet.PHOTOMETRIC_INTERPRETATION_WHITE_IS_ZERO) {
2658: if (byteData != null) {
2659: int bytesPerRow = (srcWidth * pixelBitStride + 7) / 8;
2660: for (int y = 0; y < srcHeight; y++) {
2661: int offset = dstOffset + y * scanlineStride;
2662: for (int i = 0; i < bytesPerRow; i++) {
2663: byteData[offset + i] ^= 0xff;
2664: }
2665: }
2666: } else if (shortData != null) {
2667: int shortsPerRow = (srcWidth * pixelBitStride + 15) / 16;
2668: if (sampleFormat[0] == BaselineTIFFTagSet.SAMPLE_FORMAT_SIGNED_INTEGER) {
2669: for (int y = 0; y < srcHeight; y++) {
2670: int offset = dstOffset + y * scanlineStride;
2671: for (int i = 0; i < shortsPerRow; i++) {
2672: int shortOffset = offset + i;
2673: // XXX Does this make any sense?
2674: shortData[shortOffset] = (short) (Short.MAX_VALUE - shortData[shortOffset]);
2675: }
2676: }
2677: } else {
2678: for (int y = 0; y < srcHeight; y++) {
2679: int offset = dstOffset + y * scanlineStride;
2680: for (int i = 0; i < shortsPerRow; i++) {
2681: shortData[offset + i] ^= 0xffff;
2682: }
2683: }
2684: }
2685: } else if (intData != null) {
2686: int intsPerRow = (srcWidth * pixelBitStride + 15) / 16;
2687: for (int y = 0; y < srcHeight; y++) {
2688: int offset = dstOffset + y * scanlineStride;
2689: for (int i = 0; i < intsPerRow; i++) {
2690: int intOffset = offset + i;
2691: // XXX Does this make any sense?
2692: intData[intOffset] = Integer.MAX_VALUE
2693: - intData[intOffset];
2694: }
2695: }
2696: } else if (floatData != null) {
2697: int floatsPerRow = (srcWidth * pixelBitStride + 15) / 16;
2698: for (int y = 0; y < srcHeight; y++) {
2699: int offset = dstOffset + y * scanlineStride;
2700: for (int i = 0; i < floatsPerRow; i++) {
2701: int floatOffset = offset + i;
2702: // XXX Does this make any sense?
2703: floatData[floatOffset] = 1.0F - floatData[floatOffset];
2704: }
2705: }
2706: }
2707: }
2708:
2709: if (isBilevel) {
2710: Rectangle rect = isImageSimple ? new Rectangle(dstMinX,
2711: dstMinY, dstWidth, dstHeight) : ras.getBounds();
2712: ImageUtil.setPackedBinaryData(byteData, ras, rect);
2713: }
2714:
2715: // XXX A better test might be if the rawImage raster either
2716: // equals the raster of 'image' or is a child thereof.
2717: if (isDirectCopy) { // rawImage == image) {
2718: return;
2719: }
2720:
2721: // Copy the raw image data into the true destination image
2722: Raster src = rawImage.getRaster();
2723:
2724: // Create band child of source
2725: Raster srcChild = src.createChild(0, 0, srcWidth, srcHeight,
2726: srcMinX, srcMinY, planar ? null : sourceBands);
2727:
2728: WritableRaster dst = image.getRaster();
2729:
2730: // Create dst child covering area and bands to be written
2731: WritableRaster dstChild = dst.createWritableChild(dstMinX,
2732: dstMinY, dstWidth, dstHeight, dstMinX, dstMinY,
2733: destinationBands);
2734:
2735: if (subsampleX == 1 && subsampleY == 1 && !adjustBitDepths) {
2736: srcChild = srcChild.createChild(activeSrcMinX,
2737: activeSrcMinY, activeSrcWidth, activeSrcHeight,
2738: dstMinX, dstMinY, null);
2739:
2740: dstChild.setRect(srcChild);
2741: } else if (subsampleX == 1 && !adjustBitDepths) {
2742: int sy = activeSrcMinY;
2743: int dy = dstMinY;
2744: while (sy < srcMinY + srcHeight) {
2745: Raster srcRow = srcChild.createChild(activeSrcMinX, sy,
2746: activeSrcWidth, 1, dstMinX, dy, null);
2747: dstChild.setRect(srcRow);
2748:
2749: sy += subsampleY;
2750: ++dy;
2751: }
2752: } else {
2753: int[] p = srcChild.getPixel(srcMinX, srcMinY, (int[]) null);
2754: int numBands = p.length;
2755:
2756: int sy = activeSrcMinY;
2757: int dy = dstMinY;
2758:
2759: while (sy < activeSrcMinY + activeSrcHeight) {
2760: int sx = activeSrcMinX;
2761: int dx = dstMinX;
2762:
2763: while (sx < activeSrcMinX + activeSrcWidth) {
2764: srcChild.getPixel(sx, sy, p);
2765: if (adjustBitDepths) {
2766: for (int band = 0; band < numBands; band++) {
2767: p[band] = bitDepthScale[band][p[band]];
2768: }
2769: }
2770: dstChild.setPixel(dx, dy, p);
2771:
2772: sx += subsampleX;
2773: ++dx;
2774: }
2775:
2776: sy += subsampleY;
2777: ++dy;
2778: }
2779: }
2780: }
2781: }
|