0001: /*
0002: * GeoTools - OpenSource mapping toolkit
0003: * http://geotools.org
0004: * (C) 2002-2006, GeoTools Project Managment Committee (PMC)
0005: *
0006: * This library is free software; you can redistribute it and/or
0007: * modify it under the terms of the GNU Lesser General Public
0008: * License as published by the Free Software Foundation;
0009: * version 2.1 of the License.
0010: *
0011: * This library is distributed in the hope that it will be useful,
0012: * but WITHOUT ANY WARRANTY; without even the implied warranty of
0013: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
0014: * Lesser General Public License for more details.
0015: */
0016: package org.geotools.data.postgis;
0017:
0018: import java.io.IOException;
0019: import java.sql.Connection;
0020: import java.sql.DatabaseMetaData;
0021: import java.sql.ResultSet;
0022: import java.sql.SQLException;
0023: import java.sql.Statement;
0024: import java.util.ArrayList;
0025: import java.util.Arrays;
0026: import java.util.HashMap;
0027: import java.util.HashSet;
0028: import java.util.Iterator;
0029: import java.util.List;
0030: import java.util.Map;
0031: import java.util.NoSuchElementException;
0032: import java.util.Set;
0033: import java.util.logging.Logger;
0034:
0035: import javax.sql.DataSource;
0036:
0037: import org.geotools.data.DataSourceException;
0038: import org.geotools.data.DataUtilities;
0039: import org.geotools.data.DefaultQuery;
0040: import org.geotools.data.DefaultTransaction;
0041: import org.geotools.data.FeatureListenerManager;
0042: import org.geotools.data.FeatureLocking;
0043: import org.geotools.data.FeatureReader;
0044: import org.geotools.data.FeatureSource;
0045: import org.geotools.data.FeatureStore;
0046: import org.geotools.data.FeatureWriter;
0047: import org.geotools.data.LockingManager;
0048: import org.geotools.data.Query;
0049: import org.geotools.data.Transaction;
0050: import org.geotools.data.VersioningDataStore;
0051: import org.geotools.data.jdbc.JDBCDataStoreConfig;
0052: import org.geotools.data.jdbc.JDBCUtils;
0053: import org.geotools.data.jdbc.SQLBuilder;
0054: import org.geotools.data.jdbc.fidmapper.BasicFIDMapper;
0055: import org.geotools.data.jdbc.fidmapper.FIDMapper;
0056: import org.geotools.data.jdbc.fidmapper.MultiColumnFIDMapper;
0057: import org.geotools.data.jdbc.fidmapper.TypedFIDMapper;
0058: import org.geotools.data.postgis.fidmapper.PostGISAutoIncrementFIDMapper;
0059: import org.geotools.data.postgis.fidmapper.PostgisFIDMapperFactory;
0060: import org.geotools.data.postgis.fidmapper.VersionedFIDMapper;
0061: import org.geotools.data.postgis.fidmapper.VersionedFIDMapperFactory;
0062: import org.geotools.factory.CommonFactoryFinder;
0063: import org.geotools.factory.FactoryConfigurationError;
0064: import org.geotools.feature.AttributeType;
0065: import org.geotools.feature.Feature;
0066: import org.geotools.feature.FeatureType;
0067: import org.geotools.feature.FeatureTypeBuilder;
0068: import org.geotools.feature.IllegalAttributeException;
0069: import org.geotools.feature.SchemaException;
0070: import org.geotools.filter.FilterFactoryFinder;
0071: import org.geotools.filter.FilterVisitorFilterWrapper;
0072: import org.geotools.geometry.jts.JTS;
0073: import org.opengis.filter.Filter;
0074: import org.opengis.filter.FilterFactory;
0075: import org.opengis.filter.expression.PropertyName;
0076: import org.opengis.referencing.crs.CoordinateReferenceSystem;
0077: import org.opengis.referencing.operation.TransformException;
0078:
0079: import com.vividsolutions.jts.geom.Envelope;
0080:
0081: /**
0082: * Postgis datastore with versioning support. On the implementation level, this subclass basically
0083: * acts as a mapper between the base class, that sees the data structures as how they really are,
0084: * and the outside view, that hides versioning columns and extra versioning tables from feature
0085: * types.
0086: * <p>
0087: * Assumptions made by the data store:
0088: * <ul>
0089: * <li>There is a primary key in tables that need to be turned into versioned ones</li>
0090: * <li>Primary key columns are mapped in the FID mapper.</li>
0091: * </ul>
0092: *
0093: * @author aaime
0094: * @since 2.4
0095: *
0096: */
0097: public class VersionedPostgisDataStore implements VersioningDataStore {
0098:
0099: /** The logger for the postgis module. */
0100: protected static final Logger LOGGER = org.geotools.util.logging.Logging
0101: .getLogger("org.geotools.data.postgis");
0102:
0103: public static final String TBL_VERSIONEDTABLES = "versioned_tables";
0104:
0105: public static final String TBL_TABLESCHANGED = "tables_changed";
0106:
0107: public static final String TBL_CHANGESETS = "changesets";
0108:
0109: static final String REVISION = "revision";
0110:
0111: static final String VERSION = "version";
0112:
0113: static final Class[] SUPPORTED_FID_MAPPERS = new Class[] {
0114: BasicFIDMapper.class, MultiColumnFIDMapper.class,
0115: PostGISAutoIncrementFIDMapper.class };
0116:
0117: /**
0118: * Key used in transaction properties to hold the commit author
0119: */
0120: public static final String AUTHOR = "PgVersionedCommitAuthor";
0121:
0122: /**
0123: * Key used in transaction properties to hold the commit message
0124: */
0125: public static final String MESSAGE = "PgVersionedCommitMessage";
0126:
0127: /**
0128: * Key used to store the feature types touched by the current transaction
0129: */
0130: public static final String DIRTYTYPES = "PgVersionedDirtyTypes";
0131:
0132: /**
0133: * The wrapped postgis datastore that we leverage for most operations
0134: */
0135: protected WrappedPostgisDataStore wrapped;
0136:
0137: /**
0138: * Holds boolean markers used to determine wheter a table is versioned or not (caching to
0139: * increase speed since isVersioned() check is required in every other public API)
0140: */
0141: protected Map versionedMap = new HashMap();
0142:
0143: /** Manages listener lists for FeatureSource implementations */
0144: protected FeatureListenerManager listenerManager = new FeatureListenerManager();
0145:
0146: public VersionedPostgisDataStore(DataSource dataSource,
0147: String schema, String namespace, int optimizeMode)
0148: throws IOException {
0149: wrapped = new WrappedPostgisDataStore(dataSource, schema,
0150: namespace, optimizeMode);
0151: checkVersioningDataStructures();
0152: }
0153:
0154: public VersionedPostgisDataStore(DataSource dataSource,
0155: String schema, String namespace) throws IOException {
0156: wrapped = new WrappedPostgisDataStore(dataSource, schema,
0157: namespace);
0158: checkVersioningDataStructures();
0159: }
0160:
0161: public VersionedPostgisDataStore(DataSource dataSource,
0162: String namespace) throws IOException {
0163: wrapped = new WrappedPostgisDataStore(dataSource, namespace);
0164: checkVersioningDataStructures();
0165: }
0166:
0167: public VersionedPostgisDataStore(DataSource dataSource)
0168: throws IOException {
0169: wrapped = new WrappedPostgisDataStore(dataSource);
0170: checkVersioningDataStructures();
0171: }
0172:
0173: protected JDBCDataStoreConfig getConfig() {
0174: return wrapped.getConfig();
0175: }
0176:
0177: public String[] getTypeNames() throws IOException {
0178: List names = new ArrayList(Arrays
0179: .asList(wrapped.getTypeNames()));
0180: names.remove(TBL_TABLESCHANGED);
0181: names.remove(TBL_VERSIONEDTABLES);
0182: for (Iterator it = names.iterator(); it.hasNext();) {
0183: String name = (String) it.next();
0184: if (isVersionedFeatureCollection(name))
0185: it.remove();
0186: }
0187: return (String[]) names.toArray(new String[names.size()]);
0188: }
0189:
0190: public FeatureType getSchema(String typeName) throws IOException {
0191: FeatureType ft = wrapped.getSchema(typeName);
0192:
0193: if (!isVersioned(typeName)) {
0194: return ft;
0195: }
0196:
0197: // if the feature type is versioned, we have to map the internal feature
0198: // type to an outside vision where versioned and pk columns are not
0199: // included
0200:
0201: Set names = new HashSet(
0202: Arrays.asList(filterPropertyNames(new DefaultQuery(
0203: typeName))));
0204: List filtered = new ArrayList();
0205: for (int i = 0; i < ft.getAttributeCount(); i++) {
0206: AttributeType cat = ft.getAttributeType(i);
0207: String name = cat.getName().toLowerCase();
0208: if (names.contains(name)) {
0209: filtered.add(cat);
0210: }
0211: }
0212: AttributeType[] ats = (AttributeType[]) filtered
0213: .toArray(new AttributeType[filtered.size()]);
0214:
0215: try {
0216: return FeatureTypeBuilder.newFeatureType(ats, ft
0217: .getTypeName(), ft.getNamespace(), ft.isAbstract(),
0218: ft.getAncestors(), ft.getDefaultGeometry());
0219: } catch (SchemaException e) {
0220: throw new DataSourceException(
0221: "Error converting FeatureType from versioned (internal) schema "
0222: + "to unversioned (external) schema "
0223: + typeName, e);
0224: }
0225: }
0226:
0227: public void createSchema(FeatureType featureType)
0228: throws IOException {
0229: wrapped.createSchema(featureType);
0230: }
0231:
0232: public void updateSchema(String typeName, FeatureType featureType)
0233: throws IOException {
0234: throw new IOException(
0235: "VersionedPostgisDataStore.updateSchema not yet implemented");
0236: // TODO: implement updateSchema when the postgis data store does it
0237: }
0238:
0239: public FeatureReader getFeatureReader(Query query, Transaction trans)
0240: throws IOException {
0241: if (!isVersioned(query.getTypeName())) {
0242: return wrapped.getFeatureReader(query, trans);
0243: }
0244:
0245: // check what revision we have to gather, and build a modified query
0246: // that extracts it
0247: DefaultQuery versionedQuery = buildVersionedQuery(query);
0248:
0249: FeatureReader reader = wrapped.getFeatureReader(versionedQuery,
0250: trans);
0251: VersionedFIDMapper mapper = (VersionedFIDMapper) getFIDMapper(query
0252: .getTypeName());
0253: return new VersionedFeatureReader(reader, mapper);
0254: }
0255:
0256: public LockingManager getLockingManager() {
0257: return wrapped.getLockingManager();
0258: }
0259:
0260: public FeatureWriter getFeatureWriter(String typeName,
0261: Transaction transaction) throws IOException {
0262: return getFeatureWriter(typeName, Filter.INCLUDE, transaction);
0263: }
0264:
0265: public FeatureWriter getFeatureWriter(String typeName,
0266: Filter filter, Transaction transaction) throws IOException {
0267: if (TBL_CHANGESETS.equals(typeName))
0268: throw new DataSourceException(
0269: "Changesets feature type is read only");
0270:
0271: if (!isVersioned(typeName))
0272: return wrapped.getFeatureWriter(typeName, filter,
0273: transaction);
0274:
0275: return getFeatureWriterInternal(typeName, filter, transaction,
0276: false);
0277: }
0278:
0279: /**
0280: * Returns either a standard feature writer, or a pure append feature writer
0281: */
0282: protected FeatureWriter getFeatureWriterInternal(String typeName,
0283: Filter filter, Transaction transaction, boolean append)
0284: throws IOException, DataSourceException {
0285: // check transaction definition is ok
0286: // checkTransactionProperties(transaction);
0287:
0288: // if transaction is auto-commit, we have to create one, otherwise
0289: // the database structure may be ruined, plus we need support for
0290: // transaction state and properties.
0291: // Yet, we need to remember and commit the transaction when writer is
0292: // closed
0293: boolean autoCommit = false;
0294: if (transaction.equals(Transaction.AUTO_COMMIT)) {
0295: transaction = new DefaultTransaction();
0296: autoCommit = true;
0297: }
0298:
0299: // build a filter that extracts the last revision and remaps fid filters
0300: Filter revisionedFilter = buildVersionedFilter(typeName,
0301: filter, new RevisionInfo());
0302:
0303: // Gather the update writer, used to expire old revisions, and the
0304: // append writer, used to create new revisions
0305: FeatureWriter updateWriter;
0306: if (append)
0307: updateWriter = null;
0308: else
0309: updateWriter = wrapped.getFeatureWriter(typeName,
0310: revisionedFilter, transaction);
0311: FeatureWriter appendWriter = wrapped.getFeatureWriterAppend(
0312: typeName, transaction);
0313:
0314: // mark this feature type as dirty
0315: VersionedJdbcTransactionState state = wrapped
0316: .getVersionedJdbcTransactionState(transaction);
0317: state.setTypeNameDirty(typeName);
0318:
0319: // finally, return a feature writer that will do the proper
0320: // versioning job
0321: VersionedFIDMapper mapper = (VersionedFIDMapper) getFIDMapper(typeName);
0322: VersionedFeatureWriter writer = new VersionedFeatureWriter(
0323: updateWriter, appendWriter, getSchema(typeName), state,
0324: mapper, autoCommit);
0325: writer.setFeatureListenerManager(listenerManager);
0326: return writer;
0327: }
0328:
0329: public FeatureWriter getFeatureWriterAppend(String typeName,
0330: Transaction transaction) throws IOException {
0331: if (!isVersioned(typeName))
0332: return wrapped
0333: .getFeatureWriterAppend(typeName, transaction);
0334:
0335: return getFeatureWriterInternal(typeName, Filter.EXCLUDE,
0336: transaction, true);
0337: }
0338:
0339: public FeatureSource getFeatureSource(String typeName)
0340: throws IOException {
0341: if (isVersioned(typeName))
0342: return new VersionedPostgisFeatureStore(
0343: getSchema(typeName), this );
0344:
0345: FeatureSource source = wrapped.getFeatureSource(typeName);
0346:
0347: // changesets should be read only for the outside world
0348: if (TBL_CHANGESETS.equals(typeName))
0349: return new WrappingPostgisFeatureSource(source, this );
0350:
0351: // for the others, wrap so that we don't report the wrong owning datastore
0352: if (source instanceof FeatureLocking)
0353: return new WrappingPostgisFeatureLocking(
0354: (FeatureLocking) source, this );
0355: else if (source instanceof FeatureStore)
0356: return new WrappingPostgisFeatureStore(
0357: (FeatureStore) source, this );
0358: else
0359: return new WrappingPostgisFeatureSource(
0360: (FeatureSource) source, this );
0361: }
0362:
0363: public FeatureSource getView(Query query) throws IOException,
0364: SchemaException {
0365: throw new UnsupportedOperationException(
0366: "At the moment getView(Query) is not supported");
0367: }
0368:
0369: /**
0370: * Returns true if the specified feature type is versioned, false otherwise
0371: *
0372: * @param typeName
0373: * @return
0374: */
0375: public boolean isVersioned(String typeName) throws IOException {
0376: return isVersioned(typeName, null);
0377: }
0378:
0379: /**
0380: * Alters the versioned state of a feature type
0381: *
0382: * @param typeName
0383: * the type name that must be changed
0384: * @param versioned
0385: * if true, the type gets version enabled, if false versioning is disabled
0386: * @param t
0387: * the transaction used to performe version enabling. It shall contain user and
0388: * commit message as properties.
0389: * @throws IOException
0390: */
0391: public synchronized void setVersioned(String typeName,
0392: boolean versioned, String author, String message)
0393: throws IOException {
0394: if (typeName == null)
0395: throw new NullPointerException("TypeName cannot be null");
0396: if (typeName.equals(TBL_CHANGESETS) && versioned)
0397: throw new IOException(
0398: TBL_CHANGESETS
0399: + " is exposed as a log facility, and cannot be versioned");
0400:
0401: // no change, no action
0402: if (isVersioned(typeName) == versioned)
0403: return;
0404:
0405: if (versioned) { // turn on versioning
0406: enableVersioning(typeName, author, message);
0407: } else { // turn off versioning
0408: disableVersioning(typeName, author, message);
0409: }
0410: versionedMap.put(typeName, Boolean.valueOf(versioned));
0411: }
0412:
0413: /**
0414: * Checks wheter a type name is versioned or not.
0415: *
0416: * @param typeName
0417: * the feature type to check
0418: * @param transaction
0419: * a transaction, or null if you don't have one (use null to avoid a new revision
0420: * being created for this operation)
0421: * @return true if the type is versioned, false otherwise
0422: * @throws IOException
0423: */
0424: protected boolean isVersioned(String typeName,
0425: Transaction transaction) throws IOException {
0426: Boolean versioned = (Boolean) versionedMap.get(typeName);
0427: if (versioned == null) {
0428: // first check the type exists for good, this will throw an exception if the
0429: // schema does not
0430: if (isVersionedFeatureCollection(typeName))
0431: throw new DataSourceException("Could not find type "
0432: + typeName);
0433: if (!Arrays.asList(wrapped.getTypeNames()).contains(
0434: typeName))
0435: throw new DataSourceException("Unknown feature type "
0436: + typeName);
0437:
0438: Connection conn = null;
0439: Statement st = null;
0440: PostgisSQLBuilder sqlb = wrapped.createSQLBuilder();
0441: ResultSet rs = null;
0442: try {
0443: if (transaction != null)
0444: conn = wrapped.getConnection(transaction);
0445: else
0446: conn = wrapped.getDataSource().getConnection();
0447: st = conn.createStatement();
0448:
0449: rs = executeQuery(st, "SELECT COUNT(*) from "
0450: + sqlb.encodeTableName(TBL_VERSIONEDTABLES)
0451: + " WHERE SCHEMA = '"
0452: + getConfig().getDatabaseSchemaName() + "'" //
0453: + " AND NAME='" + typeName + "'" //
0454: + " AND VERSIONED = TRUE");
0455: rs.next();
0456: versioned = new Boolean(rs.getInt(1) > 0);
0457: } catch (SQLException e) {
0458: throw new DataSourceException(
0459: "Error occurred while checking versioned tables,"
0460: + " database support tables are probably corrupt",
0461: e);
0462: } finally {
0463: JDBCUtils.close(rs);
0464: JDBCUtils.close(st);
0465: JDBCUtils.close(conn, Transaction.AUTO_COMMIT, null);
0466: }
0467: versionedMap.put(typeName, versioned);
0468: }
0469: return versioned.booleanValue();
0470: }
0471:
0472: public boolean isVersionedFeatureCollection(String typeName)
0473: throws IOException {
0474: if (!typeName.endsWith("_vfc_view"))
0475: return false;
0476:
0477: String originalName = getVFCTableName(typeName);
0478: return Arrays.asList(wrapped.getTypeNames()).contains(
0479: originalName);
0480: }
0481:
0482: /**
0483: * @see PostgisDataStore#setWKBEnabled(boolean)
0484: * @param enabled
0485: */
0486: public void setWKBEnabled(boolean enabled) {
0487: wrapped.setWKBEnabled(enabled);
0488: }
0489:
0490: /**
0491: * @see PostgisDataStore#setWKBEnabled(boolean)
0492: * @param enabled
0493: */
0494: public void setLooseBbox(boolean enabled) {
0495: wrapped.setLooseBbox(enabled);
0496: }
0497:
0498: public FIDMapper getFIDMapper(String tableName) throws IOException {
0499: return wrapped.getFIDMapper(tableName);
0500: }
0501:
0502: /**
0503: * Returns the last revision of the repository
0504: *
0505: * @return
0506: */
0507: public long getLastRevision() throws IOException {
0508: Connection conn = null;
0509: Statement st = null;
0510: ResultSet rs = null;
0511:
0512: try {
0513: conn = wrapped.getDataSource().getConnection();
0514: st = conn.createStatement();
0515:
0516: rs = st.executeQuery("select max(revision) from "
0517: + TBL_CHANGESETS);
0518: rs.next();
0519: return rs.getLong(1);
0520: } catch (SQLException e) {
0521: throw new DataSourceException(
0522: "Error getting last revision.", e);
0523: } finally {
0524: JDBCUtils.close(rs);
0525: JDBCUtils.close(st);
0526: JDBCUtils.close(conn, Transaction.AUTO_COMMIT, null);
0527: }
0528: }
0529:
0530: /**
0531: * Returns a list of type names modified between <code>version1</code> and
0532: * <code>version2</code>, with the first version excluded.
0533: *
0534: * @param version1
0535: * the first version
0536: * @param version2
0537: * the second version, which may be null, if you need to refer the latest version
0538: * @return an array or type names, eventually empty, never null
0539: */
0540: public String[] getModifiedFeatureTypes(String version1,
0541: String version2) throws IOException {
0542: Connection conn = null;
0543: Statement st = null;
0544: ResultSet rs = null;
0545: RevisionInfo r1 = new RevisionInfo(version1);
0546: RevisionInfo r2 = new RevisionInfo(version2);
0547:
0548: if (r1.revision > r2.revision) {
0549: // swap them
0550: RevisionInfo tmp = r1;
0551: r1 = r2;
0552: r1 = tmp;
0553: }
0554:
0555: // no change occurr between n and n
0556: if (r1.revision == Long.MAX_VALUE || r1.revision == r2.revision)
0557: return new String[0];
0558:
0559: try {
0560: conn = wrapped.getDataSource().getConnection();
0561: st = conn.createStatement();
0562:
0563: rs = st.executeQuery("select distinct(name) from "
0564: + TBL_VERSIONEDTABLES + " where id in "
0565: + "(select versionedtable from "
0566: + TBL_TABLESCHANGED + " where revision > "
0567: + r1.revision + " and revision <= " + r2.revision
0568: + ")");
0569: List result = new ArrayList();
0570: while (rs.next())
0571: result.add(rs.getString(1));
0572: return (String[]) result.toArray(new String[result.size()]);
0573: } catch (SQLException e) {
0574: throw new DataSourceException(
0575: "Error getting feature types modified between "
0576: + r1.revision + " and " + r2.revision, e);
0577: } finally {
0578: JDBCUtils.close(rs);
0579: JDBCUtils.close(st);
0580: JDBCUtils.close(conn, Transaction.AUTO_COMMIT, null);
0581: }
0582: }
0583:
0584: /**
0585: * Returns a set of feature ids for features that where modified, created or deleted between
0586: * version1 and version2 and that matched the specified filter at least in one revision between
0587: * version1 and version2. <br>
0588: * If called on an unversioned feature type, will return empty Sets.
0589: * <p>
0590: * The semantics is a little complex, so here is a more detailed explaination:
0591: * <ul>
0592: * <li>A feature is said to have been modified between version1 and version2 if a new state of
0593: * it has been created after version1 and before or at version2 (included), or if it has been
0594: * deleted between version1 and version2 (included).</li>
0595: * <li>Filter is used to match every state between version1 and version2, so all new states
0596: * after version1, but also the states existent at version1 provided they existed also at
0597: * version1 + 1.</li>
0598: * <li>If at least one state matches the filter, the feature id is returned.</li>
0599: * </ul>
0600: * The result is composed of three sets of feature ids:
0601: * <ul>
0602: * <li>A matched feature created after version1 is included in the created set</li>
0603: * <li>A matched feature deleted before or at version2 is included in the deleted set</li>
0604: * <li>A matched feature not included in the created/deleted sets is included in the modified
0605: * set</li>
0606: * </ul>
0607: * The following graph illustrates feature matching and set destination. Each line represents a
0608: * feature lifeline, with different symbols for filter matched states, unmatched states, state
0609: * creation, expiration, and lack of feature existance.<br>
0610: *
0611: * <pre>
0612: * v1 v2
0613: * | |
0614: * f1 ======]..........................|........... Not returned
0615: * f2 ======][-------------------------|----------- Not returned
0616: * f3 ======|==].......................|........... Returned (deleted)
0617: * f4 ======|==][----------------------|---]....... Returned (modified)
0618: * f5 ......|.[=======]................|........... Returned (created/deleted)
0619: * f5 ......[=========]................|........... Returned (deleted)
0620: * f5 ......[-------------------][=====|====]...... Returned (modified)
0621: * f6 [-----|----][=============][-----|----------- Returned (modified)
0622: * </pre>
0623: *
0624: * Legend:
0625: * <ul>
0626: * <li> -: unmatched state</li>
0627: * <li> =: matched state</li>
0628: * <li> .: no state (feature has ben deleted)</li>
0629: * <li> [: creation of a state</li>
0630: * <li> ]: expiration of a state</li>
0631: * </ul>
0632: *
0633: * @param version1 -
0634: * the first revision
0635: * @param version2 -
0636: * the second revision, or null if you want the diff between version1 and current
0637: * @param filter a filter to limit the features that must be taken into consideration
0638: * @param users an eventual list of user ids that can be used to further filter the features,
0639: * only features touched by any of these users will be
0640: * @param transaction
0641: * @throws IOException
0642: * @throws IllegalAttributeException
0643: * @throws NoSuchElementException
0644: */
0645: public ModifiedFeatureIds getModifiedFeatureFIDs(String typeName,
0646: String version1, String version2, Filter filter,
0647: String[] users, Transaction transaction) throws IOException {
0648: if (filter == null)
0649: filter = Filter.INCLUDE;
0650: RevisionInfo r1 = new RevisionInfo(version1);
0651: RevisionInfo r2 = new RevisionInfo(version2);
0652:
0653: if (!isVersioned(typeName)) {
0654: return new ModifiedFeatureIds(r1, r2);
0655: } else if (r1.revision > r2.revision) {
0656: // swap them
0657: RevisionInfo tmp = r1;
0658: r1 = r2;
0659: r2 = tmp;
0660: }
0661:
0662: // gather revisions where the specified users were involved... that would be
0663: // a job for joins, but I don't want to make this code datastore dependent, so
0664: // far this one is relatively easy to port over to other dbms, I would like it
0665: // to stay so
0666: Set userRevisions = getRevisionsCreatedBy(typeName, r1, r2,
0667: users, transaction);
0668:
0669: // We have to perform the following query:
0670: // ------------------------------------------------------------
0671: // select rowId, revisionCreated, [columnsForSecondaryFilter]
0672: // from data
0673: // where (
0674: // (revision <= r1 and expired >= r1 and expired <= r2)
0675: // or
0676: // (revision > r1 and revision <= r2)
0677: // )
0678: // and [encodableFilterComponent]
0679: // and revision in [user created revisions]
0680: // order by rowId, revisionCreated
0681: // ------------------------------------------------------------
0682: // and then run the post filter against the results.
0683: // Beware, the query extracts rows that do match the prefilter, so it does not
0684: // allow us to conclude that a feature has been created after r1 only because
0685: // the smallest revision attribute with find is > r1. There may be a feature
0686: // that was already there, but that matches the filter only after r1.
0687: // A second query, fid filter based, is required to decide wheter a feature
0688: // has really come to live after r1.
0689: // The same goes for deletion, the may be a feature that matches the filter
0690: // only before r2, and does not match it in the state laying across r2 (if there
0691: // is one, if a rollback already occurred in the past, there may be holes, intervals
0692: // where the feature did not exist, a hole is always created when a feature is deleted
0693: // and then the deletion is rolled back).
0694:
0695: // build a list of columns we need to get out. We need the fid columns, revision (which is
0696: // part of the internal type fid) and everything to run the post filter against
0697: Set columns = new HashSet();
0698: SQLBuilder builder = wrapped.getSqlBuilder(typeName);
0699: FeatureType internalType = wrapped.getSchema(typeName);
0700: Filter preFilter = builder.getPreQueryFilter(filter);
0701: Filter postFilter = builder.getPostQueryFilter(filter);
0702: columns.addAll(Arrays.asList(DataUtilities.attributeNames(
0703: postFilter, internalType)));
0704: VersionedFIDMapper mapper = (VersionedFIDMapper) wrapped
0705: .getFIDMapper(typeName);
0706: for (int i = 0; i < mapper.getColumnCount(); i++) {
0707: columns.add(mapper.getColumnName(i));
0708: }
0709: columns.add("revision");
0710: columns.add("expired");
0711:
0712: // build a filter to extract stuff modified between r1 and r2 and matching the prefilter
0713: FilterFactory ff = CommonFactoryFinder.getFilterFactory(null);
0714: Filter revLeR1 = ff.lessOrEqual(ff.property("revision"), ff
0715: .literal(r1.revision));
0716: Filter expGeR1 = ff.greaterOrEqual(ff.property("expired"), ff
0717: .literal(r1.revision));
0718: Filter expLeR2 = ff.lessOrEqual(ff.property("expired"), ff
0719: .literal(r2.revision));
0720: Filter revGtR1 = ff.greater(ff.property("revision"), ff
0721: .literal(r1.revision));
0722: Filter revLeR2 = ff.lessOrEqual(ff.property("revision"), ff
0723: .literal(r2.revision));
0724: Filter versionFilter = ff.or(ff.and(revLeR1, ff.and(expGeR1,
0725: expLeR2)), ff.and(revGtR1, revLeR2));
0726: // ... merge in the prefilter
0727: Filter newFilter = null;
0728: if (Filter.EXCLUDE.equals(preFilter)) {
0729: return new ModifiedFeatureIds(r1, r2);
0730: } else if (Filter.INCLUDE.equals(preFilter)) {
0731: newFilter = versionFilter;
0732: } else {
0733: Filter clone = transformFidFilter(typeName, preFilter);
0734: newFilter = ff.and(versionFilter, clone);
0735: }
0736: // ... and the user revision checks
0737: if (userRevisions != null) {
0738: // if no revisions touched by those users, no changes
0739: if (userRevisions.isEmpty())
0740: return new ModifiedFeatureIds(r1, r2);
0741:
0742: List urFilters = new ArrayList(userRevisions.size());
0743: PropertyName revisionProperty = ff.property("revision");
0744: for (Iterator it = userRevisions.iterator(); it.hasNext();) {
0745: Long revision = (Long) it.next();
0746: urFilters.add(ff.equals(revisionProperty, ff
0747: .literal(revision)));
0748: }
0749: newFilter = ff.and(newFilter, ff.or(urFilters));
0750: }
0751:
0752: // query the underlying datastore
0753: FeatureReader fr = null;
0754: Set matched = new HashSet();
0755: Set createdBefore = new HashSet();
0756: Set expiredAfter = new HashSet();
0757: try {
0758: // ... first gather all fids that do match the pre and post filters between r1 and r2
0759: // and gather all those fids that we already know were born before r1 or deleted after
0760: // r2
0761: String[] colArray = (String[]) columns
0762: .toArray(new String[columns.size()]);
0763: DefaultQuery q = new DefaultQuery(typeName, newFilter,
0764: colArray);
0765: fr = wrapped.getFeatureReader(q, transaction);
0766: while (fr.hasNext()) {
0767: Feature f = fr.next();
0768: long revision = ((Long) f.getAttribute("revision"))
0769: .longValue();
0770: long expired = ((Long) f.getAttribute("expired"))
0771: .longValue();
0772:
0773: // get the external id, the one that really gives us feature identity
0774: // and not just feature history
0775: String id = mapper.getUnversionedFid(f.getID());
0776: if (!matched.contains(id)
0777: && (revision > r1.revision || (expired > r1.revision && expired <= r2.revision))
0778: && postFilter.evaluate(f)) {
0779: matched.add(id);
0780: }
0781: // little optimization, pre-gather all stuff that we already know was created before
0782: // or deleted after the interval we are taking into consideration
0783: if (revision <= r1.revision)
0784: createdBefore.add(id);
0785: if (expired > r2.revision)
0786: expiredAfter.add(id);
0787: }
0788: fr.close();
0789: fr = null;
0790:
0791: // now onto the created ones. We do start from candidates, those matched for
0792: // which the prefilter did not return a version before r1 (which does not mean it
0793: // does not exists...)
0794: Set created = new HashSet(matched);
0795: created.removeAll(createdBefore);
0796: if (!created.isEmpty()) {
0797: Filter r1FidFilter = buildFidFilter(ff, created);
0798: Filter r1Filter = buildVersionedFilter(typeName,
0799: r1FidFilter, r1);
0800: DefaultQuery r1q = new DefaultQuery(typeName, r1Filter,
0801: colArray);
0802: fr = wrapped.getFeatureReader(r1q, transaction);
0803: while (fr.hasNext()) {
0804: String versionedId = fr.next().getID();
0805: String unversionedId = mapper
0806: .getUnversionedFid(versionedId);
0807: created.remove(unversionedId);
0808: }
0809: fr.close();
0810: fr = null;
0811: }
0812:
0813: // and then onto the deleted ones. Same reasoning
0814: Set deleted = new HashSet(matched);
0815: deleted.removeAll(expiredAfter);
0816: if (!deleted.isEmpty()) {
0817: Filter r2FidFilter = buildFidFilter(ff, deleted);
0818: Filter r2Filter = buildVersionedFilter(typeName,
0819: r2FidFilter, r2);
0820: DefaultQuery r2q = new DefaultQuery(typeName, r2Filter,
0821: colArray);
0822: fr = wrapped.getFeatureReader(r2q, transaction);
0823: while (fr.hasNext()) {
0824: String versionedId = fr.next().getID();
0825: String unversionedId = mapper
0826: .getUnversionedFid(versionedId);
0827: deleted.remove(unversionedId);
0828: }
0829: fr.close();
0830: fr = null;
0831: }
0832:
0833: // all matched that are not created after or deleted before are the "modified" ones
0834: Set modified = new HashSet(matched);
0835: modified.removeAll(created);
0836: modified.removeAll(deleted);
0837:
0838: // oh, finally we have all we need to return :-)
0839: return new ModifiedFeatureIds(r1, r2, created, deleted,
0840: modified);
0841: } catch (Exception e) {
0842: throw new DataSourceException(
0843: "Error occurred while computing modified fids", e);
0844: } finally {
0845: if (fr != null)
0846: fr.close();
0847: }
0848: }
0849:
0850: /**
0851: * Gathers the revisions created by a certain group of users between two specified revisions
0852: * @param r1 the first revision
0853: * @param r2 the second revision
0854: * @param users an array of user
0855: * @return
0856: */
0857: Set getRevisionsCreatedBy(String typeName, RevisionInfo r1,
0858: RevisionInfo r2, String[] users, Transaction transaction)
0859: throws IOException {
0860: if (users == null || users.length == 0)
0861: return null;
0862:
0863: FilterFactory ff = CommonFactoryFinder.getFilterFactory(null);
0864: List filters = new ArrayList(users.length);
0865: for (int i = 0; i < users.length; i++) {
0866: filters.add(ff.equals(ff.property("author"), ff
0867: .literal(users[i])));
0868: }
0869: Filter revisionFilter = ff.between(ff.property("revision"), ff
0870: .literal(r1.revision), ff.literal(r2.revision));
0871: Filter userFilter = ff.and(ff.or(filters), revisionFilter);
0872:
0873: // again, here we could filter with a join on the feature type we're investigating, but...
0874: Query query = new DefaultQuery(
0875: VersionedPostgisDataStore.TBL_CHANGESETS, userFilter,
0876: new String[] { "revision" });
0877: Set revisions = new HashSet();
0878: FeatureReader fr = null;
0879: try {
0880: fr = wrapped.getFeatureReader(query, transaction);
0881: while (fr.hasNext()) {
0882: Feature f = fr.next();
0883: revisions.add(f.getAttribute("revision"));
0884: }
0885: } catch (IllegalAttributeException e) {
0886: throw new DataSourceException(
0887: "Error reading revisions modified by users "
0888: + Arrays.asList(users), e);
0889: } finally {
0890: if (fr != null)
0891: fr.close();
0892: }
0893: return revisions;
0894: }
0895:
0896: /**
0897: * Builds a filter from a set of feature ids, since there is no convenient way to build it using
0898: * the factory
0899: *
0900: * @param ff
0901: * @param ids
0902: * @return
0903: */
0904: Filter buildFidFilter(FilterFactory ff, Set ids) {
0905: Set featureIds = new HashSet();
0906: for (Iterator it = ids.iterator(); it.hasNext();) {
0907: String id = (String) it.next();
0908: featureIds.add(ff.featureId(id));
0909: }
0910: return ff.id(featureIds);
0911: }
0912:
0913: /**
0914: * Makes sure the required versioning data structures are available in the database
0915: *
0916: * @throws IOException
0917: */
0918: protected synchronized void checkVersioningDataStructures()
0919: throws IOException {
0920: Connection conn = null;
0921: Statement st = null;
0922: ResultSet tables = null;
0923: try {
0924: // gather a connection in auto commit mode, DDL are not subject to
0925: // transactions anyways
0926: conn = wrapped.getDataSource().getConnection();
0927: conn.setAutoCommit(false);
0928:
0929: // gather all table names and check the required tables are there
0930: boolean changeSets = false;
0931: boolean tablesChanged = false;
0932: boolean versionedTables = false;
0933: DatabaseMetaData meta = conn.getMetaData();
0934: String[] tableType = { "TABLE" };
0935: tables = meta.getTables(null, getConfig()
0936: .getDatabaseSchemaName(), "%", tableType);
0937: while (tables.next()) {
0938: String tableName = tables.getString(3);
0939: if (tableName.equals(TBL_CHANGESETS))
0940: changeSets = true;
0941: if (tableName.equals(TBL_TABLESCHANGED))
0942: tablesChanged = true;
0943: if (tableName.equals(TBL_VERSIONEDTABLES))
0944: versionedTables = true;
0945:
0946: }
0947:
0948: // if all tables are there, assume their schema is ok and go on
0949: // TODO: really check the schema is the one we want
0950: if (!(changeSets && tablesChanged && versionedTables)) {
0951:
0952: // if we have a partial match, become really angry, someone
0953: // messed up with the schema
0954: if (changeSets || tablesChanged || versionedTables) {
0955: String msg = "The versioning tables are not complete, yet some table with the same name is there.\n";
0956: msg += "Remove tables (";
0957: if (changeSets)
0958: msg += TBL_CHANGESETS + " ";
0959: if (tablesChanged)
0960: msg += TBL_TABLESCHANGED + " ";
0961: if (versionedTables)
0962: msg += TBL_VERSIONEDTABLES;
0963: msg += ") before using again the versioned data store";
0964: throw new IOException(msg);
0965: }
0966:
0967: // ok, lets create versioning support tables then
0968: st = conn.createStatement();
0969: // according to internet searches, the max length of postgres
0970: // tables identifiers
0971: // is 63 chars
0972: // (http://www.postgresql.org/docs/faqs.FAQ_DEV.html#item2.2,
0973: // NAMEDATALEN is 64 but the string is null terminated)
0974: execute(
0975: st,
0976: "CREATE TABLE "
0977: + TBL_VERSIONEDTABLES
0978: + "(ID SERIAL PRIMARY KEY, "
0979: + "SCHEMA VARCHAR(63) NOT NULL, NAME VARCHAR(63) NOT NULL, "
0980: + "VERSIONED BOOLEAN NOT NULL)");
0981: execute(
0982: st,
0983: "CREATE TABLE "
0984: + TBL_CHANGESETS
0985: + "(REVISION BIGSERIAL PRIMARY KEY, "
0986: + "AUTHOR VARCHAR(256), "
0987: + "DATE TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, " //
0988: + "MESSAGE TEXT)");
0989: String schema = getConfig().getDatabaseSchemaName();
0990: if (schema == null)
0991: schema = "public";
0992: execute(st, "SELECT ADDGEOMETRYCOLUMN('" + schema
0993: + "', '" + TBL_CHANGESETS
0994: + "', 'bbox', 4326, 'POLYGON', 2)");
0995:
0996: execute(st, "CREATE TABLE " + TBL_TABLESCHANGED
0997: + "(REVISION BIGINT NOT NULL REFERENCES "
0998: + TBL_CHANGESETS
0999: + ", VERSIONEDTABLE INT NOT NULL REFERENCES "
1000: + TBL_VERSIONEDTABLES
1001: + ", PRIMARY KEY (REVISION, VERSIONEDTABLE))");
1002:
1003: // and finally commit table creation (yes, Postgres supports
1004: // transacted DDL)
1005: conn.commit();
1006: }
1007: } catch (SQLException sqlException) {
1008: JDBCUtils
1009: .close(conn, Transaction.AUTO_COMMIT, sqlException);
1010: conn = null;
1011:
1012: String message = "Error querying database for list of tables:"
1013: + sqlException.getMessage();
1014: throw new DataSourceException(message, sqlException);
1015: } finally {
1016: JDBCUtils.close(tables);
1017: JDBCUtils.close(st);
1018: JDBCUtils.close(conn, Transaction.AUTO_COMMIT, null);
1019: }
1020:
1021: resetTypeInfo();
1022: }
1023:
1024: /**
1025: * Makes sure type infos and fid mappers are recomputed when table structures do change
1026: *
1027: * @throws IOException
1028: */
1029: void resetTypeInfo() throws IOException {
1030: wrapped.getTypeHandler().forceRefresh();
1031: wrapped.getTypeHandler().resetFIDMappers();
1032:
1033: // update the list of versioned types so that the factory know what fid
1034: // mappers do need filtering
1035: VersionedFIDMapperFactory factory = (VersionedFIDMapperFactory) wrapped
1036: .getFIDMapperFactory();
1037: factory.setVersionedTypes(getVersionedTypeNames());
1038:
1039: // ensure this does not get a typed fid mapper for changesets
1040: // we want easy extraction of the generated revision
1041: // wrapped.setFIDMapper(TBL_CHANGESETS, new PostGISAutoIncrementFIDMapper(TBL_CHANGESETS, "revision",
1042: // Types.BIGINT, true));
1043: }
1044:
1045: private String[] getVersionedTypeNames() throws IOException {
1046: Connection conn = null;
1047: Statement st = null;
1048: ResultSet rs = null;
1049: List tables = new ArrayList();
1050: try {
1051: PostgisSQLBuilder sqlb = wrapped.createSQLBuilder();
1052: conn = wrapped.getDataSource().getConnection();
1053: st = conn.createStatement();
1054:
1055: rs = executeQuery(st, "SELECT NAME from "
1056: + sqlb.encodeTableName(TBL_VERSIONEDTABLES)
1057: + " WHERE SCHEMA = '"
1058: + getConfig().getDatabaseSchemaName() + "'" //
1059: + " AND VERSIONED ='" + true + "'");
1060: while (rs.next()) {
1061: tables.add(rs.getString(1));
1062: }
1063: } catch (SQLException sqlException) {
1064: String message = "Error querying database for list of versioned tables:"
1065: + sqlException.getMessage();
1066: throw new DataSourceException(message, sqlException);
1067: } finally {
1068: JDBCUtils.close(rs);
1069: JDBCUtils.close(st);
1070: JDBCUtils.close(conn, Transaction.AUTO_COMMIT, null);
1071: }
1072: return (String[]) tables.toArray(new String[tables.size()]);
1073: }
1074:
1075: /**
1076: * Enables versioning for the specified feature type, appending a commit message to the
1077: * changeset
1078: *
1079: * @param typeName
1080: * @param author
1081: * @param message
1082: * @throws IOException
1083: * @throws DataSourceException
1084: */
1085: private void enableVersioning(String typeName, String author,
1086: String message) throws IOException, DataSourceException {
1087: // this will make the fid mapper be computed and stick... but we do have
1088: // timeouts...
1089: FIDMapper mapper = wrapped.getFIDMapper(typeName);
1090:
1091: // check we can version enable this table, we need a supported fid
1092: // mapper
1093: if (!checkSupportedMapper(mapper)) {
1094: if (mapper instanceof TypedFIDMapper)
1095: mapper = ((TypedFIDMapper) mapper).getWrappedMapper();
1096: throw new IOException("This feature type (" + typeName
1097: + ") is associated to "
1098: + "an unsupported fid mapper: " + mapper.getClass()
1099: + "\n" + "The supported fid mapper classes are: "
1100: + Arrays.asList(SUPPORTED_FID_MAPPERS));
1101: }
1102: // TODO: check for tables that have version reserved column names,
1103: // we may throw better error messages
1104:
1105: // have a default message
1106: if (message == null)
1107: message = "Version enabling " + typeName;
1108:
1109: // alter table structure in a transaction
1110: Connection conn = null;
1111: Statement st = null;
1112: ResultSet rs = null;
1113: PostgisSQLBuilder sqlb = wrapped.createSQLBuilder();
1114: Transaction t = new DefaultTransaction();
1115: t.putProperty(AUTHOR, author);
1116: t.putProperty(MESSAGE, message);
1117: try {
1118: // gather the transaction state and pick the version number, also
1119: // update the dirty feature types
1120: // --> if we do this among other alter tables a deadlock occurs,
1121: // don't know why...
1122: VersionedJdbcTransactionState state = wrapped
1123: .getVersionedJdbcTransactionState(t);
1124: state.setTypeNameDirty(typeName);
1125: long revision = state.getRevision();
1126:
1127: // gather bbox, we need it for the first commit msg
1128: Envelope envelope = wrapped.getFeatureSource(typeName)
1129: .getBounds();
1130: if (envelope != null) {
1131: CoordinateReferenceSystem crs = wrapped.getSchema(
1132: typeName).getDefaultGeometry()
1133: .getCoordinateSystem();
1134: if (crs != null)
1135: envelope = JTS.toGeographic(envelope, crs);
1136: state.expandDirtyBounds(envelope);
1137: }
1138:
1139: // setup for altering tables (and ensure a versioned state is
1140: // attached to the transaction
1141: conn = state.getConnection();
1142: st = conn.createStatement();
1143: PkDescriptor pk = getPrimaryKeyConstraintName(conn,
1144: typeName);
1145: if (pk == null)
1146: throw new DataSourceException(
1147: "Cannot version tables without primary keys");
1148:
1149: // build a comma separated list of old pk columns
1150: String colList = "";
1151: for (int i = 0; i < pk.columns.length; i++) {
1152: colList += "," + pk.columns[i];
1153: }
1154:
1155: // drop the old primary key
1156: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1157: + " DROP CONSTRAINT " + pk.name);
1158:
1159: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1160: + " ADD COLUMN REVISION BIGINT REFERENCES "
1161: + TBL_CHANGESETS);
1162: // TODO: add some runtime check that acts as a foreign key iif
1163: // the value is not Long.MAX_VALUE
1164: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1165: + " ADD COLUMN EXPIRED BIGINT NOT NULL DEFAULT "
1166: + Long.MAX_VALUE);
1167: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1168: + " ADD COLUMN CREATED BIGINT REFERENCES "
1169: + TBL_CHANGESETS);
1170:
1171: // update all rows in the table with the new revision number
1172: // and turn revision into a not null column
1173: execute(st, "UPDATE " + sqlb.encodeTableName(typeName)
1174: + " SET REVISION = " + revision + " , CREATED = "
1175: + revision);
1176: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1177: + " ALTER REVISION SET NOT NULL");
1178: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1179: + " ALTER CREATED SET NOT NULL");
1180:
1181: // now recreate the primary key with revision as first column
1182: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1183: + " ADD CONSTRAINT " + pk.name
1184: + " PRIMARY KEY(REVISION " + colList + ")");
1185: // add secondary index
1186: execute(st, "CREATE INDEX " + typeName.toUpperCase()
1187: + "_REVIDX" + " ON " + typeName + "(EXPIRED"
1188: + colList + ")");
1189:
1190: // mark the table as versioned. First check if we already have
1191: // records for this table
1192: // then insert or update
1193: rs = executeQuery(st, "SELECT VERSIONED from "
1194: + sqlb.encodeTableName(TBL_VERSIONEDTABLES)
1195: + " WHERE SCHEMA = '"
1196: + getConfig().getDatabaseSchemaName() + "'" //
1197: + " AND NAME='" + typeName + "'");
1198: if (rs.next()) {
1199: // we already have the table listed, it was versioned in the past
1200: execute(st, "UPDATE "
1201: + sqlb.encodeTableName(TBL_VERSIONEDTABLES) //
1202: + " SET VERSIONED = TRUE " //
1203: + " WHERE SCHEMA = '"
1204: + getConfig().getDatabaseSchemaName() + "'" //
1205: + " AND NAME='" + typeName + "'");
1206: } else {
1207: // this has never been versioned, insert new records
1208: execute(st, "INSERT INTO "
1209: + sqlb.encodeTableName(TBL_VERSIONEDTABLES)
1210: + " VALUES(default, " + "'"
1211: + getConfig().getDatabaseSchemaName() + "','"
1212: + typeName + "', TRUE)");
1213: }
1214: rs.close();
1215:
1216: // create view to support versioned feature collection extraction
1217: createVersionedFeatureCollectionView(typeName, conn);
1218:
1219: // phew... done!
1220: t.commit();
1221:
1222: // and now wipe out the cached feature type, we just changed it, but
1223: // do not change the fid mapper, it's still ok (or it isn't?)
1224: // MIND, this needs to be done _after_ the transaction is committed,
1225: // otherewise transaction writing will try to get metadata with
1226: // alters still in progress and the whole thing will lock up
1227: resetTypeInfo();
1228: } catch (SQLException sql) {
1229: throw new DataSourceException(
1230: "Error occurred during version enabling. "
1231: + "Does your table have columns with reserved names?",
1232: sql);
1233: } catch (TransformException e) {
1234: throw new DataSourceException(
1235: "Error occurred while trying to compute the lat/lon bounding box "
1236: + "affected by this operation", e);
1237: } finally {
1238: JDBCUtils.close(rs);
1239: JDBCUtils.close(st);
1240: JDBCUtils.close(conn, t, null);
1241: t.close();
1242: }
1243: }
1244:
1245: void createVersionedFeatureCollectionView(String typeName)
1246: throws IOException {
1247: Connection conn = null;
1248: Transaction t = new DefaultTransaction();
1249: try {
1250: conn = wrapped.getConnection(t);
1251: createVersionedFeatureCollectionView(typeName, conn);
1252: t.commit();
1253: } finally {
1254: JDBCUtils.close(conn, t, null);
1255: }
1256: }
1257:
1258: /**
1259: * Creates the <TABLE>_VFC_VIEW for the specified feature type
1260: * @param conn
1261: * @param typeName
1262: */
1263: private void createVersionedFeatureCollectionView(String typeName,
1264: Connection conn) throws IOException {
1265: Statement st = null;
1266: try {
1267: st = conn.createStatement();
1268:
1269: String viewName = getVFCViewName(typeName);
1270: st
1271: .execute("CREATE VIEW "
1272: + viewName
1273: + "\n "
1274: + "AS SELECT "
1275: + "cr.revision as \"creationVersion\", cr.author as \"createdBy\", "
1276: + "cr.date as \"creationDate\", cr.message as \"creationMessage\", "
1277: + "lu.revision as \"lastUpdateVersion\", lu.author as \"lastUpdatedBy\", "
1278: + "lu.date as \"lastUpdateDate\", lu.message as \"lastUpdateMessage\","
1279: + typeName + ".*\n" + "FROM " + typeName
1280: + " inner join " + " changesets lu on "
1281: + typeName + ".revision = lu.revision "
1282: + " inner join changesets cr on "
1283: + typeName + ".created = cr.revision");
1284:
1285: // make sure there is no other row for this view (one might have remained
1286: // due to errors, and we would end up with a primary key violation)
1287: st.execute("DELETE FROM geometry_columns "
1288: + "WHERE f_table_schema = current_schema()"
1289: + "AND f_table_name = '" + viewName + "'");
1290:
1291: st
1292: .execute("INSERT INTO geometry_columns \n"
1293: + "SELECT '', current_schema(), '"
1294: + viewName
1295: + "', "
1296: + " gc.f_geometry_column, gc.coord_dimension, gc.srid, gc.type\n"
1297: + "FROM geometry_columns as gc\n"
1298: + "WHERE gc.f_table_name = '" + typeName
1299: + "'");
1300: } catch (SQLException e) {
1301: throw new DataSourceException(
1302: "Issues creating versioned feature collection view for "
1303: + typeName, e);
1304: } finally {
1305: JDBCUtils.close(st);
1306: }
1307: }
1308:
1309: /**
1310: * Given a type name returns the name of the versioned feature collection view
1311: * associated to it
1312: * @param typeName
1313: * @return
1314: */
1315: public static String getVFCViewName(String typeName) {
1316: return typeName + "_vfc_view";
1317: }
1318:
1319: /**
1320: * Given a versioned feature collection view name returns the base table name
1321: * @param vfcTypeName
1322: * @return
1323: * @throws IOException
1324: */
1325: public static String getVFCTableName(String vfcTypeName)
1326: throws IOException {
1327: if (vfcTypeName.endsWith("_vfc_view"))
1328: return vfcTypeName.substring(0, vfcTypeName
1329: .lastIndexOf("_vfc_view"));
1330: else
1331: throw new IOException("Specified type " + vfcTypeName
1332: + " is not a versioned feature collection view");
1333: }
1334:
1335: /**
1336: * Disables versioning for the specificed feature type, appeding a commit message to the
1337: * changeset
1338: *
1339: * @param typeName
1340: * @param author
1341: * @param message
1342: * @throws IOException
1343: */
1344: private void disableVersioning(String typeName, String author,
1345: String message) throws IOException {
1346: // have a default message
1347: if (message == null)
1348: message = "Version disabling " + typeName;
1349:
1350: // alter table structure in a transaction
1351: Connection conn = null;
1352: Statement st = null;
1353: PostgisSQLBuilder sqlb = wrapped.createSQLBuilder();
1354: Transaction t = new DefaultTransaction();
1355: t.putProperty(AUTHOR, author);
1356: t.putProperty(MESSAGE, message);
1357: try {
1358: // gather the transaction state and pick the version number, also
1359: // update the dirty feature types
1360: // --> if we do this among other alter tables a deadlock occurs,
1361: // don't know why...
1362: VersionedJdbcTransactionState state = wrapped
1363: .getVersionedJdbcTransactionState(t);
1364: state.setTypeNameDirty(typeName);
1365:
1366: // the following is funny, but if I don't gather the revision now, the transactio may
1367: // lock... not sure why...
1368: state.getRevision();
1369:
1370: // gather bbox, we need it for the first commit msg
1371: Envelope envelope = wrapped.getFeatureSource(typeName)
1372: .getBounds();
1373: if (envelope != null) {
1374: CoordinateReferenceSystem crs = wrapped.getSchema(
1375: typeName).getDefaultGeometry()
1376: .getCoordinateSystem();
1377: if (crs != null)
1378: envelope = JTS.toGeographic(envelope, crs);
1379: state.expandDirtyBounds(envelope);
1380: }
1381:
1382: // drop the versioning feature collection view
1383: conn = state.getConnection();
1384: st = conn.createStatement();
1385: try {
1386: st.execute("DROP VIEW " + typeName + "_vfc_view");
1387: } catch (SQLException e) {
1388: // if the view wasn't there no problem
1389: }
1390: st
1391: .execute("DELETE FROM geometry_columns WHERE f_table_schema = current_schema() "
1392: + " AND f_table_name = '"
1393: + typeName
1394: + "_vfc_view'");
1395:
1396: // build a comma separated list of old pk columns, just skip the
1397: // first
1398: // which we know is "revision"
1399: PkDescriptor pk = getPrimaryKeyConstraintName(conn,
1400: typeName);
1401: if (pk == null)
1402: throw new DataSourceException(
1403: "Cannot version tables without primary keys");
1404:
1405: String colList = "";
1406: for (int i = 1; i < pk.columns.length; i++) {
1407: colList += "," + pk.columns[i];
1408: }
1409: colList = colList.substring(1);
1410:
1411: // drop the current primary key and the index
1412: execute(st, "DROP INDEX "
1413: + sqlb.encodeTableName(typeName.toLowerCase()
1414: + "_revidx"));
1415: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1416: + " DROP CONSTRAINT " + pk.name);
1417:
1418: // drop versioning columns
1419: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1420: + " DROP COLUMN REVISION");
1421: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1422: + " DROP COLUMN EXPIRED");
1423: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1424: + " DROP COLUMN CREATED");
1425:
1426: // now recreate theold primary key with revision as first column
1427: execute(st, "ALTER TABLE " + sqlb.encodeTableName(typeName)
1428: + " ADD CONSTRAINT " + pk.name + " PRIMARY KEY("
1429: + colList + ")");
1430:
1431: // mark the table as versioned
1432: execute(st, "UPDATE "
1433: + sqlb.encodeTableName(TBL_VERSIONEDTABLES)
1434: + " SET VERSIONED = FALSE WHERE SCHEMA = '"
1435: + getConfig().getDatabaseSchemaName()
1436: + "' AND NAME = '" + typeName + "'");
1437:
1438: // phew... done!
1439: t.commit();
1440:
1441: // and now wipe out the cached feature type, we just changed it, but
1442: // do not change the fid mapper, it's still ok (or it isn't?)
1443: // MIND, this needs to be done _after_ the transaction is committed,
1444: // otherewise transaction writing will try to get metadata with
1445: // alters still in progress and the whole thing will lock up
1446: resetTypeInfo();
1447: } catch (SQLException sql) {
1448: throw new DataSourceException(
1449: "Error occurred during version enabling. "
1450: + "Does your table have columns with reserved names?",
1451: sql);
1452: } catch (TransformException e) {
1453: throw new DataSourceException(
1454: "Error occurred while trying to compute the lat/lon bounding box "
1455: + "affected by this operation", e);
1456: } finally {
1457: JDBCUtils.close(st);
1458: JDBCUtils.close(conn, t, null);
1459: t.close();
1460: }
1461:
1462: }
1463:
1464: /**
1465: * Logs the sql at info level, then executes the command
1466: *
1467: * @param st
1468: * @param sql
1469: * @throws SQLException
1470: */
1471: protected void execute(Statement st, String sql)
1472: throws SQLException {
1473: LOGGER.fine(sql);
1474: st.execute(sql);
1475: }
1476:
1477: /**
1478: * Logs the sql at info level, then executes the command
1479: *
1480: * @param st
1481: * @param sql
1482: * @throws SQLException
1483: */
1484: protected ResultSet executeQuery(Statement st, String sql)
1485: throws SQLException {
1486: LOGGER.fine(sql);
1487: return st.executeQuery(sql);
1488: }
1489:
1490: // /**
1491: // * Checks versioning transaction properties are there. At the moment the
1492: // * check is strict, we may want to support default values thought.
1493: // *
1494: // * @param t
1495: // * @throws IOException
1496: // */
1497: // private void checkTransactionProperties(Transaction t) throws IOException
1498: // {
1499: // if (t.getProperty(AUTHOR) == null)
1500: // throw new IOException(
1501: // "Transaction author property should be set, it's not");
1502: // }
1503:
1504: /**
1505: * Returns the primary key constraint name for the specified table
1506: *
1507: * @param conn
1508: * @param typeName
1509: * @return
1510: */
1511: private PkDescriptor getPrimaryKeyConstraintName(Connection conn,
1512: String table) throws SQLException {
1513: PkDescriptor descriptor = null;
1514: ResultSet columns = null;
1515: try {
1516: // extract primary key information
1517: columns = conn.getMetaData().getPrimaryKeys(null,
1518: getConfig().getDatabaseSchemaName(), table);
1519: if (!columns.next())
1520: return null;
1521:
1522: // build the descriptor
1523: descriptor = new PkDescriptor();
1524: descriptor.name = columns.getString("PK_NAME");
1525: List colnames = new ArrayList();
1526: do {
1527: colnames.add(columns.getString("COLUMN_NAME"));
1528: } while (columns.next());
1529: descriptor.columns = (String[]) colnames
1530: .toArray(new String[colnames.size()]);
1531: } finally {
1532: JDBCUtils.close(columns);
1533: }
1534: return descriptor;
1535: }
1536:
1537: /**
1538: * Returs true if the provided fid mapper is supported by the verisioning engine, false
1539: * otherwise
1540: *
1541: * @param mapper
1542: * @return
1543: */
1544: private boolean checkSupportedMapper(FIDMapper mapper) {
1545: if (mapper instanceof TypedFIDMapper) {
1546: mapper = ((TypedFIDMapper) mapper).getWrappedMapper();
1547: }
1548: for (int i = 0; i < SUPPORTED_FID_MAPPERS.length; i++) {
1549: if (SUPPORTED_FID_MAPPERS[i].isAssignableFrom(mapper
1550: .getClass()))
1551: return true;
1552:
1553: }
1554: return false;
1555: }
1556:
1557: /**
1558: * Takes a filter and merges in the extra conditions needed to extract the specified revision
1559: *
1560: * @param filter
1561: * The original filter
1562: * @param ri
1563: * The revision information
1564: *
1565: * @return a new filter
1566: * @throws FactoryConfigurationError
1567: * @throws IOException
1568: */
1569: Filter buildVersionedFilter(String featureTypeName, Filter filter,
1570: RevisionInfo ri) throws IOException {
1571: // build extra filter we need to append to query in order to retrieve
1572: // the desired revision
1573: FilterFactory ff = CommonFactoryFinder.getFilterFactory(null);
1574: Filter extraFilter = null;
1575: if (ri.isLast()) {
1576: // expired = Long.MAX_VALUE
1577: extraFilter = ff.equals(ff.property("expired"), ff
1578: .literal(Long.MAX_VALUE));
1579: } else {
1580: // revision <= [revision]
1581: // and expired > [revision]
1582: Filter revf = ff.lessOrEqual(ff.property("revision"), ff
1583: .literal(ri.revision));
1584: Filter expf = ff.greater(ff.property("expired"), ff
1585: .literal(ri.revision));
1586: extraFilter = ff.and(revf, expf);
1587: }
1588:
1589: // handle include and exclude separately since the
1590: // filter factory does not handle them properly
1591: if (filter.equals(Filter.EXCLUDE)) {
1592: return Filter.EXCLUDE;
1593: }
1594: if (filter.equals(Filter.INCLUDE))
1595: return extraFilter;
1596:
1597: // we need to turn eventual fid filters into normal filters since we
1598: // played tricks on fids and hidden the revision attribute
1599: // (which is part of the primary key)
1600: Filter transformedFidFilter = transformFidFilter(
1601: featureTypeName, filter);
1602: Filter newFilter = ff.and(transformedFidFilter, extraFilter);
1603:
1604: return newFilter;
1605: }
1606:
1607: /**
1608: * Takes a fid filter with external fids and turns it into a set of filters against internal
1609: * feature type attributes, that is, an equivalent filter that can run against the internal,
1610: * versioned feature type
1611: *
1612: * @param featureTypeName
1613: * @param filter
1614: * @return
1615: * @throws IOException
1616: * @throws FactoryConfigurationError
1617: */
1618: protected Filter transformFidFilter(String featureTypeName,
1619: Filter filter) throws IOException,
1620: FactoryConfigurationError {
1621: if (isVersionedFeatureCollection(featureTypeName))
1622: featureTypeName = getVFCTableName(featureTypeName);
1623: FeatureType featureType = wrapped.getSchema(featureTypeName);
1624: VersionedFIDMapper mapper = (VersionedFIDMapper) wrapped
1625: .getFIDMapper(featureTypeName);
1626: FidTransformeVisitor transformer = new FidTransformeVisitor(
1627: FilterFactoryFinder.createFilterFactory(), featureType,
1628: mapper);
1629: FilterVisitorFilterWrapper wrapper = new FilterVisitorFilterWrapper(
1630: transformer);
1631: filter.accept(wrapper, null);
1632: Filter clone = (Filter) transformer.getCopy();
1633: return clone;
1634: }
1635:
1636: /**
1637: * Given an original query and a revision info, builds an equivalent query that will work
1638: * against the specified revision
1639: *
1640: * @param query
1641: * @param ri
1642: * @return
1643: * @throws FactoryConfigurationError
1644: * @throws IOException
1645: */
1646: DefaultQuery buildVersionedQuery(Query query) throws IOException {
1647: RevisionInfo ri = new RevisionInfo(query.getVersion());
1648: // build a filter that limits the number
1649: Filter newFilter = buildVersionedFilter(query.getTypeName(),
1650: query.getFilter(), ri);
1651: DefaultQuery versionedQuery = new DefaultQuery(query);
1652: versionedQuery.setPropertyNames(filterPropertyNames(query));
1653: versionedQuery.setFilter(newFilter);
1654: return versionedQuery;
1655: }
1656:
1657: /**
1658: * Returns all property names besides versioning properties
1659: *
1660: * @param query
1661: * @return
1662: * @throws IOException
1663: */
1664: private String[] filterPropertyNames(Query query)
1665: throws IOException {
1666: String[] names = wrapped.propertyNames(query);
1667:
1668: Set extraColumns = new HashSet();
1669: if (isVersionedFeatureCollection(query.getTypeName())
1670: || isVersioned(query.getTypeName(), null)) {
1671: extraColumns.add("revision");
1672: extraColumns.add("expired");
1673: extraColumns.add("created");
1674: }
1675: FIDMapper mapper = getFIDMapper(query.getTypeName());
1676: for (int i = 0; i < mapper.getColumnCount(); i++) {
1677: extraColumns.add(mapper.getColumnName(i).toLowerCase());
1678: }
1679:
1680: List filtered = new ArrayList(names.length);
1681: for (int i = 0; i < names.length; i++) {
1682: if (!extraColumns.contains(names[i]))
1683: filtered.add(names[i]);
1684: }
1685: return (String[]) filtered.toArray(new String[filtered.size()]);
1686: }
1687:
1688: /**
1689: * Simple struct used to carry primary key metadata information
1690: *
1691: * @author aaime
1692: *
1693: */
1694: private static class PkDescriptor {
1695: String name;
1696:
1697: String[] columns;
1698: }
1699:
1700: public void dispose() {
1701: if (wrapped != null) {
1702: wrapped.dispose();
1703: wrapped = null;
1704: }
1705: }
1706: }
|