0001: /*
0002: * Copyright (c) 1998 - 2005 Versant Corporation
0003: * All rights reserved. This program and the accompanying materials
0004: * are made available under the terms of the Eclipse Public License v1.0
0005: * which accompanies this distribution, and is available at
0006: * http://www.eclipse.org/legal/epl-v10.html
0007: *
0008: * Contributors:
0009: * Versant Corporation - initial API and implementation
0010: */
0011: package com.versant.core.jdbc;
0012:
0013: import com.versant.core.common.State;
0014: import com.versant.core.common.*;
0015: import com.versant.core.util.CharBuf;
0016: import com.versant.core.util.IntArray;
0017: import com.versant.core.metadata.*;
0018: import com.versant.core.server.*;
0019: import com.versant.core.logging.LogEventStore;
0020: import com.versant.core.jdo.ServerLogEvent;
0021: import com.versant.core.jdo.QueryDetails;
0022: import com.versant.core.jdo.VersantQueryPlan;
0023: import com.versant.core.storagemanager.*;
0024: import com.versant.core.jdbc.metadata.*;
0025: import com.versant.core.jdbc.sql.SqlDriver;
0026: import com.versant.core.jdbc.sql.exp.SelectExp;
0027: import com.versant.core.jdbc.sql.exp.SqlExp;
0028: import com.versant.core.jdbc.sql.exp.Join;
0029: import com.versant.core.jdbc.sql.exp.ColumnExp;
0030: import com.versant.core.jdbc.conn.LoggingResultSet;
0031: import com.versant.core.jdbc.conn.PooledPreparedStatement;
0032: import com.versant.core.jdbc.query.JdbcCompiledQuery;
0033: import com.versant.core.jdbc.query.JdbcJDOQLCompiler;
0034:
0035: import com.versant.core.jdbc.ejbql.JdbcEJBQLCompiler;
0036: import com.versant.core.jdbc.ejbql.JdbcQueryResultEJBQL;
0037:
0038: import java.sql.*;
0039: import java.lang.reflect.Modifier;
0040: import java.util.*;
0041:
0042: /**
0043: * StorageManager implementation for JDBC.
0044: */
0045: public final class JdbcStorageManager implements StorageManager {
0046:
0047: private final ModelMetaData jmd;
0048: private final StorageCache cache;
0049: private final JdbcConnectionSource conSrc;
0050: private final SqlDriver sqlDriver;
0051: private final CompiledQueryCache compiledQueryCache;
0052: private final LogEventStore pes;
0053:
0054: private int lockPolicy;
0055: private int conPolicy;
0056:
0057: private Connection conx;
0058: private Object cacheTx;
0059: private VersantClientJDBCConnection clientCon;
0060:
0061: private boolean txActive;
0062: private boolean optimistic;
0063: private boolean pinned; // do not release conx until end of tx
0064: private boolean flushed; // updates done without commit on conx so do
0065: // not use the cache even if optimistic
0066: private boolean forUpdateField;
0067:
0068: private JdbcQueryResult queryResultHead;
0069: private JdbcQueryResult queryResultTail;
0070:
0071: // this is a reference to persistGraphFullSort or persistGraphPartialSort
0072: private PersistGraph activePersistGraph;
0073: private PersistGraphFullSort persistGraphFullSort;
0074: private PersistGraph persistGraphPartialSort;
0075:
0076: private final boolean useBatchInsert;
0077: private final boolean useBatchUpdate;
0078:
0079: private boolean[] changedClassesFlag;
0080: private ClassMetaData[] changedClasses;
0081: private int changedClassCount;
0082:
0083: public static final String STATUS_OPEN_QUERY_RESULT_COUNT = "openQueryResultCount";
0084:
0085: public JdbcStorageManager(ModelMetaData jmd,
0086: JdbcConnectionSource conSrc, SqlDriver sqlDriver,
0087: StorageCache cache, CompiledQueryCache compiledQueryCache,
0088: LogEventStore pes, JdbcConfig c) {
0089: this .jmd = jmd;
0090: this .conSrc = conSrc;
0091: this .sqlDriver = sqlDriver;
0092: this .cache = cache;
0093: this .compiledQueryCache = compiledQueryCache;
0094: this .pes = pes;
0095: useBatchInsert = !c.jdbcDisableStatementBatching
0096: && sqlDriver.isInsertBatchingSupported();
0097: useBatchUpdate = !c.jdbcDisableStatementBatching
0098: && sqlDriver.isUpdateBatchingSupported();
0099: conPolicy = CON_POLICY_RELEASE;
0100: lockPolicy = LOCK_POLICY_NONE;
0101: }
0102:
0103: public boolean isForUpdate() {
0104: return forUpdateField;
0105: }
0106:
0107: public boolean isOptimistic() {
0108: return optimistic;
0109: }
0110:
0111: public boolean isActive() {
0112: return txActive;
0113: }
0114:
0115: public void begin(boolean optimistic) {
0116: if (txActive) {
0117: throw BindingSupportImpl.getInstance().internal(
0118: "tx already active");
0119: }
0120: this .optimistic = optimistic;
0121: txActive = true;
0122: setFlagsForLockPolicy();
0123: }
0124:
0125: public void commit() {
0126: checkActiveTx();
0127: commitAndReleaseCon(flushed || !optimistic
0128: || conPolicy != CON_POLICY_RELEASE);
0129: if (cacheTx != null) {
0130: cache.endTx(cacheTx);
0131: cacheTx = null;
0132: }
0133: flushed = pinned = false;
0134: txActive = false;
0135: }
0136:
0137: public void rollback() {
0138: checkActiveTx();
0139: rollbackImp(false);
0140: }
0141:
0142: private void rollbackImp(boolean reset) {
0143: try {
0144: if (conx != null) {
0145: closeAllQueries();
0146: try {
0147: conx.rollback();
0148: if (conPolicy != CON_POLICY_PIN || reset) {
0149: if (clientCon != null) {
0150: clientCon.close();
0151: }
0152: conSrc.returnConnection(conx);
0153: conx = null;
0154: }
0155: } catch (SQLException e) {
0156: throw handleException(e);
0157: }
0158: }
0159: } finally {
0160: if (cacheTx != null) {
0161: cache.endTx(cacheTx);
0162: cacheTx = null;
0163: }
0164: flushed = pinned = false;
0165: txActive = false;
0166: }
0167: }
0168:
0169: public void setConnectionPolicy(int policy) {
0170: conPolicy = policy;
0171: }
0172:
0173: public int getConnectionPolicy() {
0174: return conPolicy;
0175: }
0176:
0177: public void setLockingPolicy(int policy) {
0178: lockPolicy = policy;
0179: setFlagsForLockPolicy();
0180: }
0181:
0182: public int getLockingPolicy() {
0183: return lockPolicy;
0184: }
0185:
0186: public void logEvent(int level, String description, int ms) {
0187: // todo this must move to the event logging and error handling proxy
0188: switch (level) {
0189: case EVENT_ERRORS:
0190: if (!pes.isSevere())
0191: return;
0192: break;
0193: case EVENT_NORMAL:
0194: if (!pes.isFine())
0195: return;
0196: break;
0197: case EVENT_VERBOSE:
0198: if (!pes.isFiner())
0199: return;
0200: break;
0201: case EVENT_ALL:
0202: if (!pes.isFinest())
0203: return;
0204: break;
0205: }
0206: ServerLogEvent ev = new ServerLogEvent(ServerLogEvent.USER,
0207: description);
0208: ev.setTotalMs(ms);
0209: pes.log(ev);
0210: }
0211:
0212: public StorageManager getInnerStorageManager() {
0213: return null;
0214: }
0215:
0216: /**
0217: * Can data be retrieved from or stored in the cache given our current
0218: * state? This is true if no flush has been done and the current tx is
0219: * optimistic or there is no active tx.
0220: */
0221: private boolean canUseCache() {
0222: return optimistic && !flushed || !txActive;
0223: }
0224:
0225: public StatesReturned fetch(ApplicationContext context, OID oid,
0226: State current, FetchGroup fetchGroup,
0227: FieldMetaData triggerField) {
0228: try {
0229: StatesReturned container = new StatesReturned(context);
0230: if (canUseCache()) {
0231: State s = cache.getState(oid, fetchGroup);
0232: if (s == null) {
0233: ClassMetaData base = oid.getBaseClassMetaData();
0234: if (base.cacheStrategy == MDStatics.CACHE_STRATEGY_ALL
0235: && !base.cacheStrategyAllDone) {
0236: base.cacheStrategyAllDone = true;
0237: StatesReturned all = new StatesReturned(
0238: DummyApplicationContext.INSTANCE);
0239: getAllStates(context, base, fetchGroup,
0240: container.next = all);
0241: s = all.get(oid);
0242: }
0243: }
0244: // todo fetch related objects from cache if s came from cache
0245: if (s == null) {
0246: getState(oid, fetchGroup, container);
0247: } else {
0248: container.add(oid, s);
0249: }
0250: } else {
0251: getState(oid, fetchGroup, container);
0252: }
0253: finishRead(container);
0254: return container;
0255: } catch (Throwable t) {
0256: finishFailedRead();
0257: throw handleException(t);
0258: }
0259: }
0260:
0261: public StatesReturned fetch(ApplicationContext context,
0262: OIDArray oids, FieldMetaData triggerField) {
0263: try {
0264: StatesReturned container = new StatesReturned(context);
0265: int n = oids.size();
0266: if (canUseCache()) {
0267: StatesReturned all = null;
0268: for (int i = 0; i < n; i++) {
0269: OID oid = oids.oids[i];
0270: ClassMetaData cmd = oid.getAvailableClassMetaData();
0271: FetchGroup fg = cmd.fetchGroups[0];
0272: State s = cache.getState(oid, fg);
0273: if (s == null) {
0274: ClassMetaData base = oid.getBaseClassMetaData();
0275: if (base.cacheStrategy == MDStatics.CACHE_STRATEGY_ALL
0276: && !base.cacheStrategyAllDone) {
0277: base.cacheStrategyAllDone = true;
0278: if (all == null) {
0279: container.next = all = new StatesReturned(
0280: DummyApplicationContext.INSTANCE);
0281: }
0282: getAllStates(context, base,
0283: base.fetchGroups[0], all);
0284: s = all.get(oid);
0285: }
0286: }
0287: // todo fetch related objects from cache if s came from cache
0288: if (s == null) {
0289: getState(oid, fg, container);
0290: } else {
0291: container.add(oid, s);
0292: }
0293: }
0294: } else {
0295: for (int i = 0; i < n; i++) {
0296: OID oid = oids.oids[i];
0297: ClassMetaData cmd = oid.getAvailableClassMetaData();
0298: getState(oid, cmd.fetchGroups[0], container);
0299: }
0300: }
0301: finishRead(container);
0302: return container;
0303: } catch (Throwable t) {
0304: finishFailedRead();
0305: throw handleException(t);
0306: }
0307: }
0308:
0309: public StatesReturned store(StatesToStore toStore,
0310: DeletePacket toDelete, boolean returnFieldsUpdatedBySM,
0311: int storeOption, boolean evictClasses) {
0312: checkActiveTx();
0313: if (storeOption == STORE_OPTION_FLUSH) {
0314: // make sure open queries will not be cached if this is a flush
0315: for (JdbcQueryResult qrw = queryResultHead; qrw != null; qrw = qrw.prev) {
0316: qrw.setNonCacheble();
0317: }
0318: flushed = pinned = true;
0319: }
0320:
0321: // persist changes to the database
0322: StatesReturned container = new StatesReturned(
0323: DummyApplicationContext.INSTANCE);
0324: boolean updates = toStore != null && !toStore.isEmpty();
0325: boolean deletes = toDelete != null && !toDelete.isEmpty();
0326: try {
0327: if (updates) {
0328: doUpdates(toStore, container, returnFieldsUpdatedBySM);
0329: }
0330: if (deletes) {
0331: doDeletes(toDelete);
0332: }
0333: clearNonAutoSetFields(container);
0334: } catch (Exception e) {
0335: throw handleException(e);
0336: } finally {
0337: if (activePersistGraph != null) {
0338: activePersistGraph.clear();
0339: activePersistGraph = null;
0340: }
0341: }
0342:
0343: boolean commit = storeOption == STORE_OPTION_COMMIT;
0344: switch (storeOption) {
0345: case STORE_OPTION_COMMIT:
0346: commit = true;
0347: commitAndReleaseCon(!optimistic || flushed || updates
0348: || deletes || conPolicy != CON_POLICY_RELEASE);
0349: flushed = pinned = false;
0350: txActive = false;
0351: break;
0352: case STORE_OPTION_PREPARE:
0353: // make sure conx.commit() is done when commit() is called later
0354: flushed = flushed || updates || deletes;
0355: break;
0356: }
0357:
0358: // evict from cache
0359: cacheTx();
0360: if (toStore.epcAll) {
0361: cache.evictAll(cacheTx);
0362: } else if (evictClasses || changedClassCount > 0) {
0363: addChangedClasses(toStore, toDelete);
0364: cache.evict(cacheTx, changedClasses, changedClassCount);
0365: clearChangedClasses();
0366: } else {
0367: int expected = toStore.size()
0368: + toDelete.size()
0369: + (toStore.epcOids == null ? 0
0370: : toStore.epcOids.length);
0371: cache.evict(cacheTx, toStore.oids, 0, toStore.size(),
0372: expected);
0373: cache.evict(cacheTx, toDelete.oids, 0, toDelete.size(),
0374: expected);
0375: if (toStore.epcClasses != null) {
0376: int n = toStore.epcClasses.length;
0377: ClassMetaData[] a = new ClassMetaData[n];
0378: for (int i = 0; i < n; i++) {
0379: a[i] = jmd.classes[toStore.epcClasses[i]];
0380: }
0381: cache.evict(cacheTx, a, n);
0382: }
0383: if (toStore.epcOids != null) {
0384: cache.evict(cacheTx, toStore.epcOids, 0,
0385: toStore.epcOids.length, expected);
0386: }
0387: }
0388:
0389: // get rid of cache transaction if we have committed
0390: if (commit) {
0391: cache.endTx(cacheTx);
0392: cacheTx = null;
0393: }
0394:
0395: return container;
0396: }
0397:
0398: public OID createOID(ClassMetaData cmd) {
0399: checkActiveTx();
0400: JdbcClass jdbcClass = (JdbcClass) cmd.storeClass;
0401: JdbcKeyGenerator keygen = jdbcClass.jdbcKeyGenerator;
0402: if (keygen == null) {
0403: throw BindingSupportImpl.getInstance()
0404: .invalidOperation(
0405: "Class " + cmd.qname
0406: + " has no jdbc-key-generator");
0407: }
0408: if (keygen.isPostInsertGenerator()) {
0409: throw BindingSupportImpl
0410: .getInstance()
0411: .invalidOperation(
0412: "Class "
0413: + cmd.qname
0414: + " is using a post insert jdbc-key-generator");
0415: }
0416: OID oid = cmd.createOID(true);
0417: Object[] oidData = new Object[((JdbcMetaData) jmd.jdbcMetaData).maxPkSimpleColumns];
0418: try {
0419: Connection kgcon = null;
0420: boolean rollback = true;
0421: try {
0422: boolean needKgcon = keygen.isRequiresOwnConnection();
0423: if (needKgcon) {
0424: kgcon = conSrc.getConnection(true, false);
0425: }
0426: keygen.generatePrimaryKeyPre(cmd.qname,
0427: jdbcClass.table, 1, oidData, needKgcon ? kgcon
0428: : con());
0429: oid.copyKeyFields(oidData);
0430: rollback = false;
0431: } finally {
0432: if (kgcon != null) {
0433: if (rollback) {
0434: kgcon.rollback();
0435: } else {
0436: kgcon.commit();
0437: }
0438: conSrc.returnConnection(kgcon);
0439: }
0440: }
0441: // make sure normal con is not released until end of tx
0442: if (kgcon == null && conx != null) {
0443: pinned = true;
0444: }
0445: return oid;
0446: } catch (SQLException e) {
0447: throw handleException(e);
0448: }
0449: }
0450:
0451: public CompiledQuery compileQuery(QueryDetails query) {
0452: JdbcCompiledQuery cq = (JdbcCompiledQuery) compiledQueryCache
0453: .get(query);
0454: if (cq == null) {
0455: cq = compile(query);
0456: // apply caching override, if any
0457: switch (query.getCacheable()) {
0458: case QueryDetails.FALSE:
0459: cq.setCacheable(false);
0460: break;
0461: case QueryDetails.TRUE:
0462: cq.setCacheable(true);
0463: break;
0464: }
0465: cq = (JdbcCompiledQuery) compiledQueryCache.add(cq);
0466: }
0467:
0468: return cq;
0469: }
0470:
0471: public ExecuteQueryReturn executeQuery(ApplicationContext context,
0472: QueryDetails query, CompiledQuery compiledQuery,
0473: Object[] params) {
0474: JdbcCompiledQuery cq;
0475: if (compiledQuery == null) {
0476: cq = (JdbcCompiledQuery) compileQuery(query);
0477: } else {
0478: cq = (JdbcCompiledQuery) compiledQuery;
0479: }
0480: JdbcQueryResult res = null;
0481: if (cq.isEJBQLHack()) {
0482:
0483: res = new JdbcQueryResultEJBQL(this , cq, params,
0484: canUseCache());
0485:
0486: } else {
0487: res = new JdbcQueryResult(this , cq, params, canUseCache());
0488: }
0489: addQueryResult(res);
0490: return res;
0491: }
0492:
0493: public QueryResultContainer executeQueryAll(
0494: ApplicationContext context, QueryDetails query,
0495: CompiledQuery compiledQuery, Object[] params) {
0496: JdbcCompiledQuery cq;
0497: if (compiledQuery == null) {
0498: cq = (JdbcCompiledQuery) compileQuery(query);
0499: } else {
0500: cq = (JdbcCompiledQuery) compiledQuery;
0501: }
0502: try {
0503: QueryResultContainer container = new QueryResultContainer(
0504: context, cq);
0505: if (cq.isCacheble() && cache.isQueryCacheEnabled()
0506: && (!txActive || optimistic && !flushed)) {
0507: CachedQueryResult res = cache
0508: .getQueryResult(cq, params);
0509: if (res == null
0510: || !addToContainer(cq, params, res, container)) {
0511: fillContainerWithAll(context, cq, params, container);
0512: //try and add the results to the cache
0513: res = new CachedQueryResult();
0514: // container.container.addIndirectOIDs(res);
0515: container.addResultsTo(res, cq
0516: .isCopyResultsForCache());
0517: finishRead(container.container, cq, params, res, -1);
0518: }
0519: } else {
0520: fillContainerWithAll(context, cq, params, container);
0521: finishRead(container.container, cq, params, null, -1);
0522: }
0523: return container;
0524: } catch (Throwable t) {
0525: finishFailedRead();
0526: throw handleException(t);
0527: }
0528: }
0529:
0530: public int executeQueryCount(QueryDetails query,
0531: CompiledQuery compiledQuery, Object[] params) {
0532: JdbcCompiledQuery cq;
0533: if (compiledQuery == null) {
0534: cq = (JdbcCompiledQuery) compileQuery(query);
0535: } else {
0536: cq = (JdbcCompiledQuery) compiledQuery;
0537: }
0538: try {
0539: int ans;
0540: if (cq.isCacheble() && cache.isQueryCacheEnabled()
0541: && (!txActive || optimistic && !flushed)) {
0542: ans = cache.getQueryResultCount(cq, params);
0543: if (ans < 0) {
0544: ans = executeCount(cq, params);
0545: finishRead(null, cq, params, null, ans);
0546: }
0547: } else {
0548: ans = executeCount(cq, params);
0549: }
0550: return ans;
0551: } catch (Throwable t) {
0552: finishFailedRead();
0553: throw handleException(t);
0554: }
0555: }
0556:
0557: public VersantQueryPlan getQueryPlan(QueryDetails query,
0558: CompiledQuery compiledQuery, Object[] params) {
0559: try {
0560: if (compiledQuery == null) {
0561: compiledQuery = compileQuery(query);
0562: }
0563: VersantQueryPlan qp = executePlan(
0564: (JdbcCompiledQuery) compiledQuery, params);
0565: finishRead();
0566: return qp;
0567: } catch (Throwable t) {
0568: finishFailedRead();
0569: throw handleException(t);
0570: }
0571: }
0572:
0573: public QueryResultContainer fetchNextQueryResult(
0574: ApplicationContext context, RunningQuery runningQuery,
0575: int skipAmount) {
0576: JdbcQueryResult res = (JdbcQueryResult) runningQuery;
0577: if (res == null || res.isFinished()) {
0578: return null;
0579: }
0580: try {
0581: QueryResultContainer container = new QueryResultContainer(
0582: context, res.getCompiledQuery());
0583: boolean cacheable = canUseCache()
0584: && res.isCachedResultsOk();
0585: if (cacheable
0586: && checkCacheForQuery(res.getJdbcCompiledQuery(),
0587: res.getParams(), container)) {
0588: // we got data from cache
0589: container.qFinished = true;
0590: res.close();
0591: removeQueryResult(res);
0592: } else { // get from database
0593: CachedQueryResult queryData = null;
0594: res.updateCacheble();
0595:
0596: if (res.nextBatch(context, skipAmount, container)) {
0597: // query has finished
0598: if (cacheable && res.isCacheble()) {
0599: queryData = res.qRCache;
0600: }
0601: res.close();
0602: removeQueryResult(res);
0603: }
0604: finishRead(container.container, res
0605: .getJdbcCompiledQuery(), res.getParams(),
0606: queryData, -1);
0607: }
0608: return container;
0609: } catch (Throwable t) {
0610: finishFailedRead();
0611: throw handleException(t);
0612: }
0613: }
0614:
0615: public QueryResultContainer fetchRandomAccessQueryResult(
0616: ApplicationContext context, RunningQuery runningQuery,
0617: int index, int fetchAmount) {
0618: try {
0619: JdbcQueryResult res = (JdbcQueryResult) runningQuery;
0620: QueryResultContainer qContainer = new QueryResultContainer(
0621: context, res.getCompiledQuery());
0622:
0623: res.getAbsolute(context, qContainer, index, fetchAmount);
0624: finishRead(qContainer.container,
0625: res.getJdbcCompiledQuery(), res.getParams(), null,
0626: 0);
0627: return qContainer;
0628: } catch (Throwable t) {
0629: finishFailedRead();
0630: throw handleException(t);
0631: }
0632: }
0633:
0634: public int getRandomAccessQueryCount(ApplicationContext context,
0635: RunningQuery runningQuery) {
0636: return ((JdbcQueryResult) runningQuery).getResultCount();
0637: }
0638:
0639: public void closeQuery(RunningQuery runningQuery) {
0640: JdbcQueryResult res = (JdbcQueryResult) runningQuery;
0641: if (!res.isClosed()) {
0642: res.close();
0643: removeQueryResult(res);
0644: finishRead();
0645: }
0646: }
0647:
0648: public Object getDatastoreConnection() {
0649: if (clientCon == null) {
0650: clientCon = new VersantClientJDBCConnection(this , con());
0651: pinned = true;
0652: }
0653: return clientCon;
0654: }
0655:
0656: /**
0657: * This is called when a JDBC Connection previously given to a client
0658: * is closed.
0659: */
0660: public void clientConClosed() {
0661: clientCon = null;
0662: }
0663:
0664: public boolean isNotifyDirty() {
0665: return false;
0666: }
0667:
0668: public void notifyDirty(OID oid) {
0669: throw BindingSupportImpl.getInstance().internal(
0670: "should not be called");
0671: }
0672:
0673: public void reset() {
0674: resetImp();
0675: forUpdateField = false;
0676: lockPolicy = LOCK_POLICY_NONE;
0677: conPolicy = CON_POLICY_RELEASE;
0678: clearChangedClasses();
0679: }
0680:
0681: public void destroy() {
0682: resetImp();
0683: }
0684:
0685: private void resetImp() {
0686: try {
0687: rollbackImp(true);
0688: } catch (Exception e) {
0689: // ignore
0690: }
0691: finishFailedRead();
0692: }
0693:
0694: /**
0695: * Wrap an exception appropriately and return one to be thrown.
0696: */
0697: public RuntimeException handleException(Throwable e) {
0698: return handleException(e.toString(), e, false, null);
0699: }
0700:
0701: /**
0702: * Wrap an exception appropriately and return one to be thrown.
0703: */
0704: public RuntimeException handleException(String msg, Throwable e) {
0705: return handleException(msg, e, false, null);
0706: }
0707:
0708: /**
0709: * Wrap an exception appropriately and return one to be thrown.
0710: */
0711: public RuntimeException handleException(String msg, Throwable e,
0712: boolean convertLockTimeout, Object failed) {
0713: if (convertLockTimeout && isOptimistic()
0714: && sqlDriver.isHandleLockTimeout()
0715: && sqlDriver.isLockTimeout(e)) {
0716:
0717: throw BindingSupportImpl.getInstance().concurrentUpdate(
0718: "Row is locked: " + msg, failed);
0719: }
0720: return sqlDriver.mapException(e, msg, true);
0721: }
0722:
0723: /**
0724: * Get the names of all tables in the database converted to lower case.
0725: * The lower case name is mapped to the real case name.
0726: */
0727: public HashMap getDatabaseTableNames(Connection con)
0728: throws SQLException {
0729: ArrayList a = sqlDriver.getTableNames(con);
0730: int n = a.size();
0731: HashMap ans = new HashMap(n * 2);
0732: for (int i = 0; i < a.size(); i++) {
0733: String t = (String) a.get(i);
0734: ans.put(t.toLowerCase(), t);
0735: }
0736: return ans;
0737: }
0738:
0739: /**
0740: * Clear our list of changed classes.
0741: */
0742: private void clearChangedClasses() {
0743: changedClasses = null;
0744: changedClassesFlag = null;
0745: changedClassCount = 0;
0746: }
0747:
0748: /**
0749: * Add a class to the list of changed classes that we keep track of.
0750: */
0751: private void addChangedClass(ClassMetaData cmd) {
0752: if (changedClasses == null) {
0753: changedClasses = new ClassMetaData[jmd.classes.length];
0754: changedClassesFlag = new boolean[jmd.classes.length];
0755: }
0756: if (changedClassesFlag[cmd.index]) {
0757: return;
0758: }
0759: changedClasses[changedClassCount++] = cmd;
0760: changedClassesFlag[cmd.index] = true;
0761: }
0762:
0763: /**
0764: * Add all the classes referenced in toStore and toDelete to our list of
0765: * changed classes.
0766: */
0767: private void addChangedClasses(StatesToStore toStore,
0768: DeletePacket toDelete) {
0769: // add classes for all states.
0770: State[] states = toStore.states;
0771: int n = toStore.size();
0772: for (int i = 0; i < n; i++) {
0773: addChangedClass(states[i].getClassMetaData(jmd));
0774: }
0775: // add classes for all epc classes.
0776: if (toStore.epcClasses != null) {
0777: int[] a = toStore.epcClasses;
0778: for (int i = toStore.epcClassCount - 1; i >= 0; i--) {
0779: addChangedClass(jmd.classes[a[i]]);
0780: }
0781: }
0782: // Make sure the classes for all deleted OIDs are in
0783: OID[] oids = toDelete.oids;
0784: n = toDelete.size();
0785: for (int i = 0; i < n; i++) {
0786: OID oid = oids[i];
0787: addChangedClass(oid.getClassMetaData());
0788: }
0789: // Make sure the classes for all epc OIDs are in
0790: oids = toStore.epcOids;
0791: if (oids != null) {
0792: n = oids.length;
0793: for (int i = 0; i < n; i++) {
0794: addChangedClass(oids[i].getClassMetaData());
0795: }
0796: }
0797: }
0798:
0799: private void checkActiveTx() {
0800: if (!txActive) {
0801: throw BindingSupportImpl.getInstance().internal(
0802: "no active transaction");
0803: }
0804: }
0805:
0806: /**
0807: * This method must be called at the end of all top level read operations
0808: * to maybe commit and maybe release the database connection (depending on
0809: * connection policy). It will add the data in container to the cache
0810: * and also add the query data. Either container or queryData or both
0811: * may be null.
0812: */
0813: private void finishRead(StatesReturned container,
0814: JdbcCompiledQuery cq, Object[] params,
0815: CachedQueryResult queryData, int queryResultCount) {
0816: if (conx == null) {
0817: // no connection so all data must have come
0818: // from the level 2 cache so we have nothing to do
0819: return;
0820: }
0821: boolean commit;
0822: boolean release;
0823: if (optimistic || !txActive) {
0824: if (pinned) {
0825: commit = release = false;
0826: } else {
0827: // if there are open queries then we cannot commit or release
0828: commit = release = queryResultHead == null
0829: && conPolicy == CON_POLICY_RELEASE;
0830: }
0831: } else {
0832: commit = release = false;
0833: }
0834: boolean ok = false;
0835: try {
0836: if (commit) {
0837: conx.commit();
0838: }
0839: if (canUseCache()) {
0840: for (StatesReturned c = container; c != null; c = c.next) {
0841: cache.add(cacheTx(), c);
0842: }
0843: if (cq != null && cq.isCacheble()) {
0844: if (queryData != null) {
0845: cache.add(cacheTx(), cq, params, queryData);
0846: } else if (queryResultCount >= 0) {
0847: cache.add(cacheTx(), cq, params,
0848: queryResultCount);
0849: }
0850: }
0851: }
0852: if (commit && cacheTx != null) {
0853: cache.endTx(cacheTx);
0854: cacheTx = null;
0855: }
0856: if (release) {
0857: conSrc.returnConnection(conx);
0858: conx = null;
0859: }
0860: ok = true;
0861: } catch (SQLException e) {
0862: throw BindingSupportImpl.getInstance().datastore(
0863: e.toString(), e);
0864: } finally {
0865: if (!ok) {
0866: if (release && conx != null) {
0867: try {
0868: conx.rollback();
0869: } catch (Exception e) {
0870: // ignore
0871: }
0872: try {
0873: conSrc.returnConnection(conx);
0874: } catch (SQLException e) {
0875: // ignore
0876: }
0877: conx = null;
0878: }
0879: }
0880: }
0881: }
0882:
0883: private void finishRead() {
0884: finishRead(null, null, null, null, -1);
0885: }
0886:
0887: private void finishRead(StatesReturned container) {
0888: finishRead(container, null, null, null, -1);
0889: }
0890:
0891: /**
0892: * This must be called for all top level read operations that fail.
0893: * It ensures that if the connection should be released it is released.
0894: * Any exceptions are discarded as they would likely hide the original
0895: * exception that caused the read to fail.
0896: */
0897: private void finishFailedRead() {
0898: try {
0899: finishRead(null, null, null, null, -1);
0900: } catch (Exception e) {
0901: // ignore
0902: }
0903: }
0904:
0905: /**
0906: * If we have a connection optionally commit it and then release it if
0907: * the conPolicy allows. If the connection is released then any client
0908: * connection is closed. All open queries are closed.
0909: */
0910: private void commitAndReleaseCon(boolean commit) {
0911: if (conx == null)
0912: return;
0913: closeAllQueries();
0914: try {
0915: if (commit) {
0916: conx.commit();
0917: }
0918: if (conPolicy != CON_POLICY_PIN) {
0919: if (clientCon != null) {
0920: clientCon.close();
0921: }
0922: conSrc.returnConnection(conx);
0923: conx = null;
0924: }
0925: } catch (SQLException e) {
0926: throw handleException(e);
0927: }
0928: }
0929:
0930: /**
0931: * Set flags for lockPolicy as required. This must be called
0932: * when the policy changes or when a new tx starts.
0933: */
0934: private void setFlagsForLockPolicy() {
0935: forUpdateField = lockPolicy != LOCK_POLICY_NONE && !optimistic;
0936: }
0937:
0938: /**
0939: * Get our database connection. This will allocate one if we currently
0940: * have none. It will also start a cache transaction if there is none.
0941: */
0942: public Connection con() {
0943: if (conx == null) {
0944: cacheTx();
0945: try {
0946: conx = conSrc.getConnection(false, false);
0947: } catch (RuntimeException e) {
0948: throw e;
0949: } catch (Exception e) {
0950: throw BindingSupportImpl.getInstance().internal(
0951: e.toString(), e);
0952: }
0953: }
0954: return conx;
0955: }
0956:
0957: /**
0958: * Get our cache transaction. This will begin one if there is none.
0959: */
0960: public Object cacheTx() {
0961: if (cacheTx == null) {
0962: cacheTx = cache.beginTx();
0963: }
0964: return cacheTx;
0965: }
0966:
0967: /**
0968: * Get the meta data.
0969: */
0970: public ModelMetaData getJmd() {
0971: return jmd;
0972: }
0973:
0974: /**
0975: * Return a state for the supplied oid containing at least the fetch
0976: * group specified. Additional states may be supplied to the container.
0977: * The must oid must be resolved by this call. The state returned is
0978: * added to the container.
0979: */
0980: public State getState(OID oid, FetchGroup fetchGroup,
0981: StateContainer container) {
0982: ClassMetaData cmd = oid.getBaseClassMetaData();
0983: try {
0984: boolean forUpdate = forUpdateField;
0985: if (forUpdate) {
0986: JdbcClass jdbcClass = (JdbcClass) cmd.storeClass;
0987: if (sqlDriver.getSelectForUpdate() == null) {
0988: // lock the instance with an update
0989: lock((JdbcOID) oid, jdbcClass);
0990: forUpdate = false;
0991: }
0992: }
0993: State s = getStateParColFetch((JdbcOID) oid.getRealOID(),
0994: fetchGroup, forUpdate, container);
0995: if (forUpdateField && lockPolicy == LOCK_POLICY_FIRST) {
0996: forUpdateField = false;
0997: }
0998: container.add(oid, s);
0999: return s;
1000: } catch (SQLException x) {
1001: throw handleException(x);
1002: }
1003: }
1004:
1005: /**
1006: * Get a state and any prefeched states from queryResult.
1007: */
1008: public void getState(ApplicationContext context, OID oid,
1009: FetchGroup fetchGroup, JdbcQueryResult queryResult,
1010: StateContainer container) {
1011: if (!context.isStateRequired(oid, fetchGroup)) {
1012: // todo old code added state to container here
1013: return;
1014: }
1015: State s;
1016: if (canUseCache()) {
1017: s = cache.getState(oid, fetchGroup);
1018: } else {
1019: s = null;
1020: }
1021: if (s == null) {
1022: s = queryResult.getResultState(forUpdateField, container);
1023: }
1024: container.add(oid, s);
1025: }
1026:
1027: /**
1028: * Lock the oid using an update statement.
1029: */
1030: private void lock(JdbcOID oid, JdbcClass jdbcClass) {
1031: PreparedStatement ps = null;
1032: try {
1033: ps = con().prepareStatement(jdbcClass.getLockRowSql());
1034: oid.setParams(ps, 1);
1035: if (ps.executeUpdate() == 0) {
1036: throw BindingSupportImpl.getInstance().objectNotFound(
1037: oid.toSString());
1038: }
1039: } catch (SQLException e) {
1040: throw handleException(e);
1041: } finally {
1042: cleanup(ps);
1043: }
1044: }
1045:
1046: private void cleanup(Statement s) {
1047: if (s != null) {
1048: try {
1049: s.close();
1050: } catch (SQLException x) {
1051: // ignore
1052: }
1053: }
1054: }
1055:
1056: private void cleanup(ResultSet rs) {
1057: if (rs != null) {
1058: try {
1059: rs.close();
1060: } catch (SQLException x) {
1061: // ignore
1062: }
1063: }
1064: }
1065:
1066: /**
1067: * Get the state for an OID with parColFetch. A parallel fetch is not
1068: * done for the first level down.
1069: */
1070: private State getStateParColFetch(JdbcOID oid,
1071: FetchGroup fetchGroup, boolean forUpdate,
1072: StateContainer container) throws SQLException {
1073: boolean doCrossJoin = true;
1074:
1075: fetchGroup = fetchGroup.resolve(oid, jmd);
1076: State state = null;
1077: boolean includeSubclasses = !oid.isResolved();
1078:
1079: ParColFetchUtil parColFetchUtil = new ParColFetchUtil(this ,
1080: forUpdate, container, oid.getAvailableClassMetaData(),
1081: oid);
1082:
1083: final FgDs fgDs = ((JdbcFetchGroup) fetchGroup.storeFetchGroup)
1084: .getFgDs(includeSubclasses, false);
1085: String sql = getGetStateSql(fetchGroup, includeSubclasses,
1086: forUpdate, fgDs, doCrossJoin);
1087:
1088: if (sql.length() == 0) {
1089: doCrossJoin = false;
1090: // no main table columns in group but we still need to call
1091: // populate to pick up the pass 2 fields
1092: try {
1093: state = createStateImp(null, oid, fetchGroup,
1094: forUpdate, 1, null, includeSubclasses,
1095: container, fgDs, false, doCrossJoin, null);
1096: container.add(oid, state);
1097: parColFetchUtil.processParallelFetch(fgDs
1098: .getJoinStruct(), 1, true, null, oid, state,
1099: doCrossJoin, null, 0, fgDs);
1100: } finally {
1101: parColFetchUtil.close();
1102: }
1103: } else {
1104: PreparedStatement ps = null;
1105: ResultSet rs = null;
1106: try {
1107: ps = con().prepareStatement(sql);
1108: oid.setParams(ps, 1);
1109: rs = ps.executeQuery();
1110:
1111: //todo return a NullState if not to throw exception
1112: if (!rs.next()) {
1113: Utils.checkToThrowRowNotFound(oid, jmd);
1114: state = NULLState.NULL_STATE;
1115: container.add(oid, state);
1116: } else {
1117: MutableInt nextVal = new MutableInt();
1118: state = createStateImp(rs, oid, fetchGroup,
1119: forUpdate, 1, nextVal, includeSubclasses,
1120: container, fgDs, false, doCrossJoin, null);
1121: container.add(oid, state);
1122: parColFetchUtil
1123: .processParallelFetch(fgDs.getJoinStruct(),
1124: 1, true, null, oid, state,
1125: doCrossJoin, rs, nextVal.value,
1126: fgDs);
1127: }
1128: } finally {
1129: cleanup(rs);
1130: cleanup(ps);
1131: parColFetchUtil.close();
1132: }
1133: }
1134:
1135: if (Debug.DEBUG) {
1136: if (state != NULLState.NULL_STATE
1137: && Modifier
1138: .isAbstract(state.getClassMetaData(jmd).cls
1139: .getModifiers())) {
1140: throw BindingSupportImpl.getInstance().internal(
1141: "The cmd of this state is abstract");
1142: }
1143: }
1144:
1145: // oid.resolve(state);
1146: return state;
1147: }
1148:
1149: /**
1150: * Fetch a pass 2 field (e.g. a collection) and put its value(s) in state.
1151: */
1152: public int fetchPass2Field(OID oid, State state,
1153: FetchGroupField field, boolean forUpdate,
1154: StateContainer container, boolean fetchPass2Fields,
1155: ColFieldHolder colFHolder) throws SQLException {
1156: JdbcField jf = (JdbcField) field.fmd.storeField;
1157: if (jf instanceof JdbcCollectionField) {
1158: JdbcCollectionField jcf = (JdbcCollectionField) jf;
1159: return jcf.fetch(this , oid, state, field, forUpdate,
1160: container, fetchPass2Fields, colFHolder);
1161: }
1162: return 0;
1163: }
1164:
1165: /**
1166: * Get SQL to select the fetch group for the class. This will include
1167: * joins to pickup the next fetch group for referenced fields with useJoin
1168: * set to INNER or OUTER. This returns an empty String if the group does
1169: * not contain any main table fields.
1170: */
1171: private String getGetStateSql(FetchGroup group,
1172: boolean includeSubclasses, boolean forUpdate, FgDs fgDs,
1173: boolean crossJoinFirstPass2Field) {
1174: String sql = fgDs.getSql(forUpdate);
1175: if (sql != null) {
1176: return sql;
1177: }
1178:
1179: // generate a join query to get the group
1180: SelectExp root = new SelectExp();
1181: JdbcClass jdbcClass = (JdbcClass) group.classMetaData.storeClass;
1182: root.table = jdbcClass.table;
1183: addSelectFetchGroup(root, group, includeSubclasses, fgDs,
1184: crossJoinFirstPass2Field);
1185: root.whereExp = jdbcClass.table.createPkEqualsParamExp(root);
1186: if (root.selectList == null) {
1187: fgDs.setSql(sql = "", forUpdate);
1188: } else {
1189: root.forUpdate = forUpdate;
1190: fgDs.setSql(sql = generateSql(root).toString(), forUpdate);
1191: }
1192: return sql;
1193: }
1194:
1195: /**
1196: * Generate SQL text for the expression.
1197: */
1198: public CharBuf generateSql(SelectExp root) {
1199: int aliasCount = root.createAlias(0);
1200: if (aliasCount == 1)
1201: root.alias = null;
1202: CharBuf s = new CharBuf();
1203: root.appendSQL(sqlDriver, s, null);
1204: return s;
1205: }
1206:
1207: /**
1208: * Make whatever changes are necessary to root to select group. Root
1209: * must be a SelectExp from the table for the class of group. This will
1210: * append the fields in the group to select list of root and will
1211: * add joins to pick up references where required. It will reuse
1212: * existing joins where possible.<p>
1213: * <p/>
1214: * If includeSubclasses is true then fields from sub fetch groups will be
1215: * included. Outer joins will be done to pickup the fields for subclasses
1216: * stored in different tables if they have useSubclassJoin true.<p>
1217: *
1218: * @see #getGetStateSql
1219: * @see com.versant.core.jdbc.query.JdbcJDOQLCompiler#compile
1220: */
1221: public SqlExp addSelectFetchGroup(SelectExp root, FetchGroup group,
1222: boolean includeSubclasses, FgDs fgDs,
1223: boolean crossJoinFirstPass2Field) {
1224: SqlExp sqlExp = addSelectFetchGroupImp(root, group, true,
1225: includeSubclasses, fgDs, fgDs.getJoinStruct(), fgDs
1226: .isJoinOk(), root, root.table.pk, new SqlExp(),
1227: false, crossJoinFirstPass2Field, null);
1228: if (!fgDs.isFinished())
1229: fgDs.updateCounts();
1230: return sqlExp;
1231: }
1232:
1233: public SqlExp addSelectFetchGroup(SelectExp root, FetchGroup group,
1234: boolean includeSubclasses, FgDs fgDs, SelectExp s,
1235: JdbcColumn[] lCols, JdbcField fromField) {
1236: SqlExp sqlExp = addSelectFetchGroupImp(root, group, true,
1237: includeSubclasses, fgDs, fgDs.getJoinStruct(), fgDs
1238: .isJoinOk(), s, lCols, new SqlExp(), false,
1239: false, null);
1240: if (!fgDs.isFinished())
1241: fgDs.updateCounts();
1242: return sqlExp;
1243: }
1244:
1245: private SqlExp addSelectFetchGroupImp(SelectExp root,
1246: FetchGroup group, boolean includeSuperGroups,
1247: boolean includeSubclasses, FgDs fgDs, JoinStructure js,
1248: boolean joinAllowed, SelectExp joinFromExp,
1249: JdbcColumn[] joinFromCols, SqlExp subClsIdCols,
1250: boolean addIdCols, boolean crossJoinFirstPass2Field,
1251: JdbcField joinFromField) {
1252: SqlExp list = new SqlExp();
1253: SqlExp pos = list;
1254:
1255: SqlExp tail = addSelectFetchGroupImp(root, pos, group,
1256: includeSuperGroups, includeSubclasses, fgDs, fgDs, js,
1257: joinAllowed, joinFromExp, joinFromCols, subClsIdCols,
1258: joinFromField);
1259: if (includeSubclasses) {
1260: JdbcClass jdbcClass = (JdbcClass) group.classMetaData.storeClass;
1261: if (jdbcClass.classIdCol != null) {
1262: JdbcColumn classIdCol = jdbcClass.classIdCol;
1263: if (classIdCol != null) {
1264: SelectExp se;
1265: if (classIdCol.table != root.table) {
1266: Join j = joinFromExp.findJoin(classIdCol.table,
1267: joinFromField);
1268: if (j == null) {
1269: se = new SelectExp();
1270: se.table = classIdCol.table;
1271: se.outer = root.outer;
1272: j = joinFromExp.addJoin(joinFromCols,
1273: se.table.pk, se);
1274: } else {
1275: se = j.selectExp;
1276: }
1277: } else {
1278: se = root;
1279: }
1280:
1281: if (fgDs != null && !fgDs.isFinished())
1282: fgDs.addClsIdCount();
1283:
1284: // put the expression at the start of the select list
1285: SqlExp e = classIdCol.toSqlExp(se);
1286: SqlExp ee;
1287: for (ee = e; ee.next != null; ee = ee.next)
1288: ;
1289: ee.next = list.next;
1290: list.next = e;
1291: }
1292: } else {
1293: if (group.classMetaData.pcSubclasses != null
1294: && ((JdbcClass) group.classMetaData.storeClass).readAsClass == null) {
1295: int count = 0;
1296: SqlExp e = subClsIdCols.next;
1297: for (;; e = e.next) {
1298: count++;
1299: if (e.next == null)
1300: break;
1301: }
1302: e.next = list.next;
1303: list.next = subClsIdCols.next;
1304: if (fgDs != null && !fgDs.isFinished()) {
1305: fgDs.addClsIdCount(count);
1306: }
1307: }
1308: }
1309: }
1310:
1311: if (crossJoinFirstPass2Field
1312: && group.crossJoinedCollectionField != null) {
1313: ((JdbcCollectionField) group.crossJoinedCollectionField.fmd.storeField)
1314: .getSelectExpFrom(this , root,
1315: group.crossJoinedCollectionField, fgDs);
1316: }
1317:
1318: /**
1319: * Add the pk columns to the select list.
1320: */
1321: if (addIdCols) {
1322: //todo add pk fields for selection. This must also be updated in the fgds for fieldcount
1323: //should probally not do this if there is a constraint to the ref, because this could not be
1324: //null then.
1325:
1326: if (fgDs != null)
1327: fgDs.addRefIdFields();
1328: JdbcColumn[] cols = root.table.pk;
1329: ColumnExp ans = new ColumnExp(cols[0], root, null);
1330: SqlExp e = ans;
1331: int nc = cols.length;
1332: for (int k = 1; k < nc; k++) {
1333: e = e.next = new ColumnExp(cols[k], root, null);
1334: }
1335:
1336: //add to start of list
1337: e.next = list.next;
1338: list.next = ans;
1339: }
1340:
1341: SqlExp e = root.selectList;
1342: if (e == null) {
1343: root.selectList = list.next;
1344: } else {
1345: for (; e.next != null; e = e.next)
1346: ;
1347: e.next = list.next;
1348: }
1349: return tail;
1350: }
1351:
1352: private ClassMetaData extractType(ResultSet rs, FetchGroup fg,
1353: int index) throws SQLException {
1354: FetchGroup[] subFGs = fg.subFetchGroups;
1355: if (subFGs == null) {
1356: return fg.classMetaData;
1357: }
1358:
1359: ClassMetaData cmd = null;
1360: for (int i = 0; i < subFGs.length; i++) {
1361: cmd = extractTypeImp(rs, subFGs[i], index);
1362: index += subFGs[i].classMetaData.totalNoOfSubClasses + 1;
1363: if (cmd != null) {
1364: return cmd;
1365: }
1366: }
1367: return fg.classMetaData;
1368: }
1369:
1370: private ClassMetaData extractTypeImp(ResultSet rs, FetchGroup fg,
1371: int index) throws SQLException {
1372: rs.getString(index++);
1373: if (!rs.wasNull()) {
1374: FetchGroup[] subFGs = fg.subFetchGroups;
1375: if (subFGs == null) {
1376: return fg.classMetaData;
1377: }
1378:
1379: ClassMetaData cmd = null;
1380: for (int i = 0; i < subFGs.length; i++) {
1381: cmd = extractTypeImp(rs, subFGs[i], index);
1382: index += subFGs[i].classMetaData.totalNoOfSubClasses + 1;
1383: if (cmd != null) {
1384: return cmd;
1385: }
1386: }
1387: return fg.classMetaData;
1388: } else {
1389: return null;
1390: }
1391: }
1392:
1393: private SqlExp addSelectFetchGroupImp(SelectExp root, SqlExp pos,
1394: FetchGroup group, boolean includeSuperGroups,
1395: boolean includeSubclasses, FgDs fgDs, FgDs refFgDs,
1396: JoinStructure js, boolean joinAllowed,
1397: SelectExp joinFromExp, JdbcColumn[] joinFromCols,
1398: SqlExp subClsIdCols, JdbcField joinFromField) {
1399:
1400: pos = addFetchGroupFields(root, group, pos, refFgDs, js,
1401: joinAllowed, joinFromExp, joinFromCols);
1402: if (includeSuperGroups) {
1403: // include the fields from all the superclasses by following the
1404: // superFetchGroup references doing joins for superclasses
1405: // in a different table
1406: FetchGroup subG = group;
1407: for (FetchGroup supg = group.super FetchGroup; supg != null; supg = supg.super FetchGroup) {
1408: JdbcClass sc = (JdbcClass) supg.classMetaData.storeClass;
1409:
1410: FgDs nextFgDs = null;
1411: if (fgDs != null && !fgDs.isFinished()) {
1412: nextFgDs = new FgDs(fgDs, supg, "", JdbcFetchGroup
1413: .createOpts(false, false));
1414: fgDs.addSameTable(nextFgDs);
1415: }
1416: if (sc.table == ((JdbcClass) subG.classMetaData.storeClass).table) {
1417: pos = addFetchGroupFields(root, supg, pos,
1418: nextFgDs, js, joinAllowed, joinFromExp,
1419: joinFromCols);
1420: } else {
1421: Join j = joinFromExp.findJoin(sc.table,
1422: joinFromField);
1423: SelectExp se = null;
1424: if (j == null) {
1425: // different table so do an join
1426: se = new SelectExp();
1427: se.outer = root.outer;
1428: se.table = sc.table;
1429: joinFromExp.addJoin(joinFromCols, se.table.pk,
1430: se);
1431: } else {
1432: se = j.selectExp;
1433: }
1434: pos = addFetchGroupFields(se, supg, pos, nextFgDs,
1435: js, joinAllowed, joinFromExp, joinFromCols);
1436: }
1437: subG = supg;
1438: }
1439: }
1440:
1441: if (includeSubclasses && group.subFetchGroups != null) {
1442: FetchGroup[] subgroups = group.subFetchGroups;
1443: for (int i = 0; i < subgroups.length; i++) {
1444: FetchGroup subg = subgroups[i];
1445: JdbcClass sc = (JdbcClass) subg.classMetaData.storeClass;
1446:
1447: FgDs nextFgDs = null;
1448: if (fgDs != null && !fgDs.isFinished()) {
1449: nextFgDs = new FgDs(fgDs, subg, "", JdbcFetchGroup
1450: .createOpts(false, false));
1451: fgDs.addSameTable(nextFgDs);
1452: }
1453:
1454: if (sc.table == ((JdbcClass) subg.classMetaData.pcSuperMetaData.storeClass).table) {
1455: // same table so just add fields
1456: pos = addSelectFetchGroupImp(root, pos, subg,
1457: false, true, fgDs, nextFgDs, js,
1458: joinAllowed, joinFromExp, joinFromCols,
1459: subClsIdCols, joinFromField);
1460: } else {
1461: /**
1462: * Must add the pk cols first and then the fg
1463: */
1464: SelectExp se = new SelectExp();
1465: se.table = sc.table;
1466: se.outer = true;
1467: joinFromExp.addJoin(joinFromCols, se.table.pk, se);
1468:
1469: if (addIdCol(group.classMetaData)) {
1470: subClsIdCols = subClsIdCols.next = ((JdbcClass) subg.classMetaData.storeClass).table.pk[0]
1471: .toSqlExp(se);
1472: }
1473:
1474: pos = addSelectFetchGroupImp(se, pos, subg, false,
1475: true, fgDs, nextFgDs, js, joinAllowed,
1476: joinFromExp, joinFromCols, subClsIdCols,
1477: joinFromField);
1478:
1479: //advance to tail
1480: for (; subClsIdCols.next != null; subClsIdCols = subClsIdCols.next) {
1481: ;
1482: }
1483: }
1484: }
1485: }
1486: return pos;
1487: }
1488:
1489: /**
1490: * Should we add the id col of the table to determine the type. This is intended for vertical inheritance
1491: * were all classes are mapped to seperate tables.
1492: */
1493: private boolean addIdCol(ClassMetaData cmd) {
1494: if (((JdbcClass) cmd.storeClass).classIdCol == null) {
1495: return true;
1496: }
1497: return false;
1498: }
1499:
1500: private SqlExp addFetchGroupFields(SelectExp root,
1501: FetchGroup group, SqlExp pos, FgDs fgDs, JoinStructure js,
1502: boolean prefetchAllowed, SelectExp joinFromExp,
1503: JdbcColumn[] lJoinCols) {
1504: boolean joinok = !root.outer;
1505: FetchGroupField[] fields = group.fields;
1506: int n = fields.length;
1507: for (int i = 0; i < n; i++) {
1508: FetchGroupField field = fields[i];
1509: JdbcField jdbcField = (JdbcField) field.fmd.storeField;
1510: if (field.fmd.isEmbeddedRef()) {
1511: continue;
1512: }
1513: SqlExp ce = jdbcField.toColumnExp(SelectExp
1514: .createJoinToSuperTable(root, joinFromExp,
1515: lJoinCols, jdbcField), false);
1516: if (ce != null) {
1517: pos.next = ce;
1518: for (; pos.next != null; pos = pos.next)
1519: ;
1520: }
1521:
1522: if (field.fmd.secondaryField) {
1523: if (fgDs != null && !fgDs.isFinished()) {
1524: new JoinStructure(js, field);
1525: }
1526: }
1527:
1528: if (joinok && jdbcField instanceof JdbcRefField
1529: && field.jdbcUseJoin != JdbcRefField.USE_JOIN_NO) {
1530: if ((group.name.equals(FetchGroup.DFG_NAME) && !field.fmd
1531: .isDefaultFetchGroupTrue())) {
1532: continue;
1533: }
1534: JdbcRefField rf = (JdbcRefField) jdbcField;
1535: FetchGroup ng = field.nextFetchGroup;
1536:
1537: FgDs nextFgDs = null;
1538: JoinStructure nJs = null;
1539: if (fgDs != null && !fgDs.isFinished()) {
1540: nextFgDs = new FgDs(fgDs, ng, "", JdbcFetchGroup
1541: .createOpts(false, false));
1542: fgDs.addReference(nextFgDs, field.fmd);
1543: nJs = new JoinStructure(js, field);
1544: }
1545:
1546: // see if we have an existing join to this table
1547: SelectExp se;
1548: Join j = root.findJoin(rf);
1549: if (j == null) {
1550: se = new SelectExp();
1551: se.outer = field.jdbcUseJoin == JdbcRefField.USE_JOIN_OUTER;
1552: se.table = ((JdbcClass) ng.classMetaData.storeClass).table;
1553: se.jdbcField = rf;
1554: root.addJoin(rf.cols, se.table.pk, se);
1555: } else {
1556: if (j.next != null) {
1557: if (j == root.joinList) {
1558: root.joinList = j.next;
1559: } else {
1560: Join before = JdbcJDOQLCompiler
1561: .findJoinBefore(j, root.joinList);
1562: if (before != null) {
1563: before.next = j.next;
1564: }
1565: }
1566:
1567: //go to the end of the list
1568: Join lastJoin = getLastJoin(j);
1569: lastJoin.next = j;
1570: j.next = null;
1571: }
1572: se = j.selectExp;
1573: }
1574: addSelectFetchGroupImp(se, ng, true, true, nextFgDs,
1575: nJs, prefetchAllowed, root, rf.cols,
1576: new SqlExp(), true, false, rf);
1577: }
1578: }
1579: return pos;
1580: }
1581:
1582: /**
1583: * Create a state from rs which must contain columns as added by
1584: * addSelectFetchGroup. If there are no main table columns in the group
1585: * then rs may be null. If includeSubclasses is true then fields from sub
1586: * classes will be also be read. The nextCol parameter is set to the
1587: * index of the column after the last column read if it is not null.
1588: * The oid parameter will be resolved from the created state.
1589: */
1590: public State createStateImp(ResultSet rs, OID oid,
1591: FetchGroup group, boolean forUpdate, int firstCol,
1592: MutableInt nextCol, boolean includeSubclasses,
1593: StateContainer container, FgDs fgDs,
1594: boolean fetchPass2Fields, boolean crossJoinFirstPass2Field,
1595: JoinStructure js) throws SQLException {
1596: ClassMetaData cmd = group.classMetaData;
1597: if (includeSubclasses) {
1598: /**
1599: * This will only work if the hierarchy is mapped in same table
1600: */
1601: if (((JdbcClass) cmd.storeClass).classIdCol != null) {
1602: // read the classId column from rs to decide what the real class is
1603: JdbcClass jdbcClass = (JdbcClass) cmd.storeClass;
1604: JdbcColumn classIdCol = jdbcClass.classIdCol;
1605: Object classId = classIdCol.get(rs, firstCol++);
1606: if (rs.wasNull()) {
1607: throw BindingSupportImpl.getInstance()
1608: .objectNotFound(
1609: "No row for " + cmd.storeClass
1610: + " " + oid.toSString()
1611: + " OR " + classIdCol.name
1612: + " is null for row");
1613: }
1614: cmd = jdbcClass.findClass(classId);
1615: if (cmd == null) {
1616: throw BindingSupportImpl
1617: .getInstance()
1618: .fatalDatastore(
1619: "Row for OID "
1620: + oid.toSString()
1621: + " is not in the heirachy starting at "
1622: + group.classMetaData.storeClass
1623: + " (" + classIdCol.name
1624: + " for row is " + classId
1625: + ")");
1626: }
1627: } else {
1628: if (cmd.pcSubclasses != null) {
1629: if (((JdbcClass) cmd.storeClass).readAsClass != null) {
1630: cmd = ((JdbcClass) cmd.storeClass).readAsClass;
1631: } else {
1632: //must start from end of rs and work up to determine type
1633: cmd = extractType(rs, group, firstCol);
1634: if (cmd == null) {
1635: throw BindingSupportImpl
1636: .getInstance()
1637: .internal(
1638: "The instance type for "
1639: + "this row could not be determined");
1640: }
1641: //must update firstCol with the amount of possible id cols
1642: firstCol += (group.classMetaData.totalNoOfSubClasses);
1643: }
1644: }
1645: }
1646: }
1647:
1648: State state = cmd.createState();
1649: oid.resolve(state);
1650:
1651: state.copyFields(oid);
1652:
1653: //this will read the stuff into the state
1654: firstCol = readFetchGroupFields(rs, oid, state, group, firstCol);
1655: //let the same table fetchGroups read their stuff from rs
1656: final FgDs[] sameTableFgDss = fgDs.getSameTableFgDss();
1657: for (int i = 0; i < sameTableFgDss.length; i++) {
1658: FgDs sameTableFgDs = sameTableFgDss[i];
1659: if (cmd.isAncestorOrSelf(sameTableFgDs.fg.classMetaData)) {
1660: firstCol = readFetchGroupFields(rs, oid, state,
1661: sameTableFgDs.fg, firstCol);
1662: } else {
1663: firstCol = skipFetchGroupFields(sameTableFgDs.fg,
1664: firstCol);
1665: }
1666: }
1667:
1668: //add the state to the receiver as to avoid recursive calls
1669: container.visited(oid);
1670:
1671: /**
1672: * This follows pass2Fields and joins from fg.
1673: */
1674: firstCol = readFetchGroupPass2Fields(rs, oid, state, group,
1675: forUpdate, firstCol, fgDs, container, fetchPass2Fields,
1676: crossJoinFirstPass2Field);
1677: for (int i = 0; i < sameTableFgDss.length; i++) {
1678: FgDs sameTableFgDs = sameTableFgDss[i];
1679: if (cmd.isAncestorOrSelf(sameTableFgDs.fg.classMetaData)) {
1680: firstCol = readFetchGroupPass2Fields(rs, oid, state,
1681: sameTableFgDs.fg, forUpdate, firstCol,
1682: sameTableFgDs, container, fetchPass2Fields,
1683: false);
1684: } else {
1685: firstCol = skipFetchGroupPass2Fields(firstCol,
1686: sameTableFgDs);
1687: }
1688: }
1689:
1690: if (nextCol != null)
1691: nextCol.value = firstCol;
1692:
1693: if (Debug.DEBUG) {
1694: if (Modifier.isAbstract(state.getClassMetaData(jmd).cls
1695: .getModifiers())) {
1696: throw BindingSupportImpl.getInstance().internal(
1697: "The cmd of this state is abstract");
1698: }
1699: }
1700: return state;
1701: }
1702:
1703: /**
1704: * Read all the fields for the fetch group. This does not pickup reference
1705: * fields joined in.
1706: */
1707: private int readFetchGroupFields(ResultSet rs, OID oid,
1708: State state, FetchGroup group, int firstCol) {
1709: // populate the state with all the fields - this will create OIDs for
1710: // references
1711: try {
1712: ((JdbcState) state).copyPass1Fields(rs, group, firstCol);
1713: // System.out.println("\n\n after copy pass1Fields oid = " + oid + " state = " + state + "\n\n\n");
1714: } catch (Exception e) {
1715: throw handleException(
1716: "Error reading fields from ResultSet: "
1717: + oid.toSString() + ": " + e + "\n"
1718: + getResultSetInfo(rs), e);
1719: }
1720: return firstCol + group.jdbcTotalCols;
1721: }
1722:
1723: private Join getLastJoin(Join j) {
1724: Join lastJoin = j.next;
1725: for (;;) {
1726: if (lastJoin.next == null)
1727: break;
1728: lastJoin = lastJoin.next;
1729: }
1730: return lastJoin;
1731: }
1732:
1733: private static String getResultSetInfo(ResultSet rs) {
1734: if (rs instanceof LoggingResultSet) {
1735: LoggingResultSet lrs = (LoggingResultSet) rs;
1736: return "Row data read: " + lrs.getRowDataString() + "\n"
1737: + lrs.getSql();
1738: } else {
1739: return "(set event logging to all for more info on the data and SQL)";
1740: }
1741: }
1742:
1743: private int skipFetchGroupFields(FetchGroup group, int firstCol) {
1744: return firstCol + group.jdbcTotalCols;
1745: }
1746:
1747: private int skipFetchGroupPass2Fields(int firstCol, FgDs fgDs) {
1748: return firstCol + fgDs.getChildrenColumnCount();
1749: }
1750:
1751: private int readFetchGroupPass2Fields(ResultSet rs, OID oid,
1752: State state, FetchGroup group, boolean forUpdate,
1753: int firstCol, FgDs fgDs, StateContainer container,
1754: boolean fetchPass2Fields, boolean crossJoinFirstPass2Field)
1755: throws SQLException {
1756: FgDs currentFgDs;
1757: int currentFgIndex = 0;
1758: FetchGroupField[] fields = group.fields;
1759: int n = fields.length;
1760: for (int i = 0; i < n; i++) {
1761: FetchGroupField field = fields[i];
1762: FieldMetaData fmd = field.fmd;
1763:
1764: if (fmd.secondaryField) {
1765: if (crossJoinFirstPass2Field
1766: && group.crossJoinedCollectionField == field) {
1767: } else {
1768: if (fetchPass2Fields) {
1769: fetchPass2Field(oid, state, field, forUpdate,
1770: container, true, null);
1771: } else {
1772: ((JdbcCollectionField) field.fmd.storeField)
1773: .fillStateWithEmpty(field, state);
1774: }
1775: }
1776: continue;
1777: }
1778:
1779: if (fmd.category != FieldMetaData.CATEGORY_REF)
1780: continue;
1781: if (field.jdbcUseJoin == JdbcRefField.USE_JOIN_NO)
1782: continue;
1783:
1784: FetchGroup nextFetchGroup = field.nextFetchGroup;
1785: if (fgDs.isEmpty()) {
1786: currentFgDs = null;
1787: } else {
1788: currentFgDs = fgDs.get(currentFgIndex);
1789: }
1790:
1791: /**
1792: * If these fg are not the same then the fg was not included by the sql generation
1793: * so we can skip it.
1794: */
1795: if (currentFgDs != null && currentFgDs.fg == nextFetchGroup
1796: && currentFgDs.refFmd == field.fmd) {
1797: JdbcOID roid = (JdbcOID) state
1798: .getInternalObjectField(fmd.stateFieldNo);
1799: if (roid == null
1800: || !container.isStateRequired(roid,
1801: nextFetchGroup)) {
1802: firstCol += currentFgDs.columnSkipCount;
1803: } else {
1804: State ns = null;
1805: int lColIndex = firstCol;
1806: if (currentFgDs.isRefIdFieldsAdded()) {
1807: if (roid != null
1808: && !roid
1809: .validateKeyFields(rs, firstCol)) {
1810: state.setInternalObjectField(
1811: fmd.stateFieldNo, null);
1812: Utils.checkToThrowRowNotFound(oid, roid,
1813: jmd);
1814: ns = NULLState.NULL_STATE;
1815: container.addState(roid, ns);
1816: }
1817: //update the skip count
1818: firstCol += ((JdbcClass) currentFgDs.fg.classMetaData.storeClass).table.pk.length;
1819: }
1820:
1821: if (ns != NULLState.NULL_STATE) {
1822: MutableInt mut = new MutableInt();
1823: ns = createStateImp(rs, roid, nextFetchGroup,
1824: forUpdate, firstCol, mut, true,
1825: container, currentFgDs,
1826: fetchPass2Fields, false, null);
1827: roid.resolve(ns);
1828: container.addState(roid, ns);
1829: }
1830: firstCol = lColIndex + currentFgDs.columnSkipCount;
1831:
1832: if (Debug.DEBUG) {
1833: if (Modifier.isAbstract(ns
1834: .getClassMetaData(jmd).cls
1835: .getModifiers())) {
1836: throw BindingSupportImpl
1837: .getInstance()
1838: .internal(
1839: "The cmd of this state is abstract");
1840: }
1841: }
1842: }
1843: currentFgIndex++;
1844: } else {
1845: continue;
1846: }
1847: }
1848:
1849: if (Debug.DEBUG) {
1850: if (fgDs.getAmountOfRefFgDs() != currentFgIndex) {
1851: throw BindingSupportImpl.getInstance().internal(
1852: "Not the right amount of refFg was read");
1853: }
1854: }
1855: return firstCol;
1856: }
1857:
1858: private void doUpdates(StatesToStore toStore,
1859: StateContainer container, boolean retainValues)
1860: throws SQLException {
1861: initPersistGraph(toStore.isFullSortRequired(), toStore.size());
1862:
1863: OID oid = null;
1864: int n = toStore.size();
1865: for (int i = 0; i < n; i++) {
1866: oid = toStore.oids[i];
1867:
1868: // check if the state needs to be returned to the client
1869: // i.e. it has autoset fields or is new
1870: ClassMetaData cmd = toStore.states[i].getClassMetaData(jmd);
1871: if (retainValues || cmd.hasAutoSetFields) {
1872: container.add(oid, toStore.states[i]);
1873: } else if (oid.isNew()) {
1874: container.add(oid, null);
1875: }
1876:
1877: checkReqFieldsOnUpdate(oid, cmd, toStore, i);
1878: activePersistGraph.add(oid, toStore.origStates[i],
1879: toStore.states[i]);
1880: }
1881: activePersistGraph.doAutoSets();
1882: activePersistGraph.sort();
1883: if (Debug.DEBUG) {
1884: activePersistGraph.dump();
1885: }
1886: persistPass1(activePersistGraph);
1887: persistPass2(activePersistGraph);
1888: }
1889:
1890: /**
1891: * Make sure required (e.g. jdoVersion) fields are filled in origstate.
1892: * Fetch the data from the database if needed.
1893: */
1894: private void checkReqFieldsOnUpdate(OID oid, ClassMetaData cmd,
1895: StatesToStore toStore, int i) throws SQLException {
1896: if (!oid.isNew()) {
1897: FetchGroup reqFetchGroup = cmd.reqFetchGroup;
1898: if (reqFetchGroup != null) {
1899: State old = toStore.origStates[i];
1900: if (!old.containsFetchGroup(reqFetchGroup)) {
1901: State s = cache.getState(oid, reqFetchGroup);
1902: if (s == null) {
1903: s = getStateParColFetch((JdbcOID) oid,
1904: reqFetchGroup, false,
1905: DummyStateContainer.INSTANCE);
1906: }
1907: old.updateNonFilled(s);
1908: }
1909: }
1910: }
1911: }
1912:
1913: /**
1914: * Select the correct PersistGraph instance to use and make sure it is
1915: * big enough. It is cleared for use.
1916: */
1917: private void initPersistGraph(boolean fullSort, int minSize) {
1918: if (fullSort) {
1919: if (persistGraphFullSort == null
1920: || minSize > persistGraphFullSort.size()) {
1921: persistGraphFullSort = new PersistGraphFullSort(jmd,
1922: minSize * 2);
1923: } else {
1924: persistGraphFullSort.clear();
1925: }
1926: activePersistGraph = persistGraphFullSort;
1927: } else {
1928: if (persistGraphPartialSort == null
1929: || minSize > persistGraphPartialSort.size()) {
1930: persistGraphPartialSort = new PersistGraph(jmd,
1931: minSize * 2);
1932: } else {
1933: persistGraphPartialSort.clear();
1934: }
1935: activePersistGraph = persistGraphPartialSort;
1936: }
1937: activePersistGraph.optimistic = this .optimistic;
1938: }
1939:
1940: /**
1941: * Persist main table fields. All the OID's for new objects are replaced
1942: * with real OID's.
1943: *
1944: * @see #persistPass2
1945: */
1946: public void persistPass1(PersistGraph graph) {
1947: try {
1948: int[] fieldNos = new int[jmd.maxFieldsLength];
1949: Object[] oidData = new Object[((JdbcMetaData) jmd.jdbcMetaData).maxPkSimpleColumns];
1950:
1951: CharBuf s = new CharBuf();
1952: boolean haveNewObjects = false;
1953: int graphSize = graph.size();
1954:
1955: // generate primary keys for all new objects with preinsert keygens
1956: // that do not already have keys and all application identity
1957: // instances not using a post insert keygen
1958: for (int si = 0; si < graphSize; si++) {
1959: OID oid = graph.getOID(si);
1960: if (!oid.isNew()
1961: || ((NewObjectOID) oid).realOID != null)
1962: continue;
1963: haveNewObjects = true;
1964: ClassMetaData cmd = oid.getClassMetaData();
1965: JdbcKeyGenerator keygen = ((JdbcClass) cmd.storeClass).jdbcKeyGenerator;
1966: if (keygen == null) {
1967: if (cmd.identityType == MDStatics.IDENTITY_TYPE_APPLICATION) {
1968: State ns = graph.getNewState(si);
1969: if (Debug.DEBUG) {
1970: if (!ns.containsApplicationIdentityFields()) {
1971: throw BindingSupportImpl.getInstance()
1972: .internal(
1973: "pk fields not filled for appid class\n"
1974: + ns);
1975: }
1976: }
1977: NewObjectOID noid = (NewObjectOID) oid;
1978: ns.copyKeyFields(noid.realOID = cmd
1979: .createOID(true));
1980: continue;
1981: } else {
1982: throw BindingSupportImpl
1983: .getInstance()
1984: .runtime(
1985: "Class "
1986: + cmd.qname
1987: + " has identity-type "
1988: + MDStaticUtils
1989: .toIdentityTypeString(cmd.identityType)
1990: + " but no jdbc-key-generator");
1991: }
1992: } else {
1993: //if it is app id and the state does contain the key field's
1994: //then we do not use the keygen to do it.
1995: if (cmd.identityType == MDStatics.IDENTITY_TYPE_APPLICATION) {
1996: State ns = graph.getNewState(si);
1997: if (ns.containsValidAppIdFields()) {
1998: ns
1999: .copyKeyFields(((NewObjectOID) oid).realOID = cmd
2000: .createOID(true));
2001: continue;
2002: }
2003: }
2004: }
2005: if (keygen.isPostInsertGenerator())
2006: continue;
2007:
2008: // count how many keys we need
2009: int keyCount = 1;
2010: for (int i = si + 1; i < graphSize; i++) {
2011: OID nextOid = graph.getOID(i);
2012: if (!nextOid.isNew()
2013: || nextOid.getClassIndex() != cmd.index)
2014: break;
2015: if (((NewObjectOID) nextOid).realOID == null)
2016: keyCount++;
2017: }
2018:
2019: Connection kgcon = null;
2020: boolean rollback = true;
2021: try {
2022: OID realOID;
2023: for (; keyCount > 0;) {
2024: NewObjectOID noid = (NewObjectOID) graph
2025: .getOID(si++);
2026: if (noid.realOID != null)
2027: continue;
2028: noid.realOID = realOID = cmd.createOID(true);
2029: boolean needKgcon = keygen
2030: .isRequiresOwnConnection();
2031: if (kgcon == null && needKgcon) {
2032: kgcon = conSrc.getConnection(true, false);
2033: }
2034: keygen.generatePrimaryKeyPre(cmd.qname,
2035: ((JdbcClass) cmd.storeClass).table,
2036: keyCount--, oidData, needKgcon ? kgcon
2037: : con());
2038: realOID.copyKeyFields(oidData);
2039: }
2040: rollback = false;
2041: } finally {
2042: if (kgcon != null) {
2043: if (rollback) {
2044: kgcon.rollback();
2045: } else {
2046: kgcon.commit();
2047: }
2048: conSrc.returnConnection(kgcon);
2049: }
2050: }
2051: si--;
2052: }
2053:
2054: // generate the inserts and updates
2055: IntArray toUpdateIndexes = new IntArray();
2056: for (int i = 0; i < graphSize;) {
2057: OID oid = graph.getOID(i);
2058: ClassMetaData cmd = oid.getClassMetaData();
2059: if (oid.isNew()) {
2060: toUpdateIndexes.clear();
2061: i = generateInserts((NewObjectOID) oid, i, cmd,
2062: graph, fieldNos, s, oidData,
2063: toUpdateIndexes);
2064: if (toUpdateIndexes.size() > 0) {
2065: //must update these
2066: int updateStartIndex = 0;
2067: for (;;) {
2068: updateStartIndex = generateUpdatesOfCircularReferences(
2069: oid, cmd, fieldNos, haveNewObjects,
2070: s, toUpdateIndexes,
2071: updateStartIndex, graph);
2072: if (updateStartIndex == toUpdateIndexes
2073: .size())
2074: break;
2075: }
2076: }
2077: } else {
2078: i = generateUpdates(oid, i, cmd, graph, fieldNos,
2079: haveNewObjects, s);
2080: }
2081: }
2082: } catch (SQLException e) {
2083: throw handleException(e);
2084: }
2085: }
2086:
2087: /**
2088: * Generate insert statement(s) for pass1 changes to one or more new
2089: * objects. The only objects without keys will be those using app identity
2090: * and no keygen and those using postInsert keygens.
2091: *
2092: * @return The index of the last object inserted + 1
2093: */
2094: private int generateInserts(NewObjectOID oid, int index,
2095: ClassMetaData cmd, PersistGraph graph, int[] fieldNos,
2096: CharBuf s, Object[] oidData, IntArray toUpdateIndexes)
2097: throws SQLException {
2098:
2099: int identityType = cmd.identityType;
2100: boolean appIdentity = identityType == MDStatics.IDENTITY_TYPE_APPLICATION;
2101: JdbcClass jdbcClass = (JdbcClass) cmd.storeClass;
2102: int graphSize = graph.size();
2103:
2104: JdbcColumn classIdCol = jdbcClass.classIdCol;
2105: Object classId = classIdCol == null ? null
2106: : jdbcClass.jdbcClassId;
2107:
2108: JdbcKeyGenerator keygen = jdbcClass.jdbcKeyGenerator;
2109:
2110: // decide if we can use statement batching
2111: boolean batchPossible = (keygen == null || !keygen
2112: .isPostInsertGenerator())
2113: && useBatchInsert && !jdbcClass.noBatching;
2114:
2115: // count how many states we have with the same class that are new
2116: int sameClassCount = 1;
2117: for (int i = index + 1; i < graphSize; sameClassCount++, i++) {
2118: OID nextOid = graph.getOID(i);
2119: if (!nextOid.isNew()
2120: || nextOid.getClassIndex() != cmd.index)
2121: break;
2122: }
2123:
2124: Connection con = con();
2125:
2126: for (; sameClassCount > 0;) {
2127:
2128: State ns = graph.getNewState(index);
2129:
2130: // figure out what key generation needs to happen
2131: boolean useKeyGenPre = false;
2132: boolean useKeyGenPost = false;
2133: boolean fillFieldsFromOid = false;
2134: boolean clearAppIdFields = false;
2135: if (appIdentity) {
2136: if (keygen != null) {
2137: useKeyGenPost = keygen.isPostInsertGenerator();
2138: useKeyGenPre = !useKeyGenPost;
2139: fillFieldsFromOid = true;
2140: clearAppIdFields = true;
2141: ns.clearApplicationIdentityFields();
2142: }
2143: } else {
2144: useKeyGenPost = keygen.isPostInsertGenerator();
2145: useKeyGenPre = !useKeyGenPost;
2146: }
2147:
2148: // count how many states we can insert with the same SQL
2149: int count = 1;
2150: for (int i = index + 1; count < sameClassCount; count++, i++) {
2151: // make sure the next state has the same field numbers
2152: State nextState = graph.getNewState(i);
2153: if (clearAppIdFields)
2154: nextState.clearApplicationIdentityFields();
2155: if (nextState.compareToPass1(ns) != 0)
2156: break;
2157: }
2158:
2159: boolean batch = batchPossible && count > 1;
2160: int numFieldNos = ns.getPass1FieldNos(fieldNos);
2161:
2162: // do a stripe of inserts for each table for the class
2163: int startIndex = index;
2164: int startSameClassCount = sameClassCount;
2165: int startCount = count;
2166: for (int tableNo = 0; tableNo < jdbcClass.allTables.length; tableNo++) {
2167: JdbcTable table = jdbcClass.allTables[tableNo];
2168: if (tableNo > 0) { // reset after first table
2169: classIdCol = null;
2170: useKeyGenPre = true;
2171: useKeyGenPost = false;
2172: fillFieldsFromOid = false;
2173: ns = graph.getNewState(index = startIndex);
2174: oid = (NewObjectOID) graph.getOID(index);
2175: sameClassCount = startSameClassCount;
2176: count = startCount;
2177: }
2178:
2179: // create PreparedStatement and do count inserts for each
2180: PreparedStatement ps = null;
2181: try {
2182:
2183: // create ps for the insert
2184: boolean lobColFound = createInsertSql(jdbcClass,
2185: table, useKeyGenPre, classIdCol, fieldNos,
2186: numFieldNos, s, ns);
2187: if (useKeyGenPost) {
2188: String suffix = keygen
2189: .getPostInsertSQLSuffix(table);
2190: if (suffix != null)
2191: s.append(suffix);
2192: }
2193: String sql = s.toString();
2194: ps = con.prepareStatement(sql);
2195:
2196: for (;;) {
2197: boolean alreadyHaveRealOID = oid.realOID != null;
2198: JdbcOID realOID;
2199: if (alreadyHaveRealOID) {
2200: realOID = (JdbcOID) oid.realOID;
2201: } else {
2202: realOID = (JdbcOID) (oid.realOID = cmd
2203: .createOID(true));
2204: }
2205:
2206: int pos = useKeyGenPre ? realOID.setParams(ps,
2207: 1) : 1;
2208:
2209: if (classIdCol != null) {
2210: classIdCol.set(ps, pos++, classId);
2211: }
2212:
2213: // do the insert
2214: if (tableNo == 0) {
2215: if (ns.replaceNewObjectOIDs(fieldNos,
2216: numFieldNos)) {
2217: toUpdateIndexes.add(index);
2218: }
2219: }
2220: try {
2221: ((JdbcState) ns).setParams(ps, fieldNos, 0,
2222: numFieldNos, pos, graph, tableNo);
2223: } catch (Exception e) {
2224: throw handleException(
2225: "Error setting parameters on PreparedStatement "
2226: + "for insert of '"
2227: + Utils.toString(realOID)
2228: + "':\n"
2229: + JdbcUtils.toString(e)
2230: + "\n"
2231: + JdbcUtils
2232: .getPreparedStatementInfo(
2233: sql, ps), e);
2234: }
2235: if (batch) {
2236: ps.addBatch();
2237: } else {
2238: try {
2239: ps.execute();
2240: } catch (Exception e) {
2241: throw handleException(
2242: "Insert of '"
2243: + Utils
2244: .toString(realOID)
2245: + "' failed: "
2246: + JdbcUtils.toString(e)
2247: + "\n"
2248: + JdbcUtils
2249: .getPreparedStatementInfo(
2250: sql, ps),
2251: e);
2252: }
2253: }
2254:
2255: // do post insert key generation if required
2256: if (useKeyGenPost) {
2257: keygen.generatePrimaryKeyPost(cmd.qname,
2258: table, oidData, con,
2259: ((PooledPreparedStatement) ps)
2260: .getStatement());
2261: realOID.copyKeyFields(oidData);
2262: }
2263:
2264: if (fillFieldsFromOid)
2265: ns.copyFields(realOID);
2266:
2267: ++index;
2268: if (--sameClassCount == 0)
2269: break;
2270: oid = (NewObjectOID) graph.getOID(index);
2271: if (--count == 0)
2272: break;
2273:
2274: ns = graph.getNewState(index);
2275: }
2276: if (batch) {
2277: try {
2278: ps.executeBatch();
2279: } catch (Exception e) {
2280: throw handleException(
2281: "Batch insert failed: "
2282: + JdbcUtils.toString(e)
2283: + "\n"
2284: + JdbcUtils
2285: .getPreparedStatementInfo(
2286: sql, ps), e);
2287: }
2288: }
2289:
2290: // If there was at least one Oracle style LOB col we have to
2291: // select all of the non null LOB cols back to give their
2292: // converters a chance to set the data in the LOB.
2293: int lobNumFieldNos;
2294: if (lobColFound
2295: && (lobNumFieldNos = removeNullLOBFields(
2296: fieldNos, numFieldNos, ns)) > 0) {
2297: selectAndUpdateOracleLOBCols(s, startIndex,
2298: index, jdbcClass, table, fieldNos,
2299: lobNumFieldNos, graph);
2300: }
2301:
2302: } finally {
2303: cleanup(ps);
2304: }
2305: }
2306: }
2307:
2308: return index;
2309: }
2310:
2311: /**
2312: * Look at all the negative entries in fieldNos, make any that are for
2313: * not null fields in state positive and copy them to the beginning of
2314: * fieldsNos. Return the number of not-null negative entries found and
2315: * converted. This effectively compresses the array of fieldNos for
2316: * faster LOB processing.
2317: */
2318: private int removeNullLOBFields(int[] fieldNos, int numFieldNos,
2319: State state) {
2320: int pos = 0;
2321: for (int i = 0; i < numFieldNos; i++) {
2322: int fieldNo = fieldNos[i];
2323: if (fieldNo < 0) {
2324: fieldNo = -(fieldNo + 1);
2325: if (!state.isNull(fieldNo))
2326: fieldNos[pos++] = fieldNo;
2327: }
2328: }
2329: return pos;
2330: }
2331:
2332: /**
2333: * Generate SQL to insert a row into the table for a class heirachy. Any
2334: * entries in fieldNos for fields that return true to appendInsertValueList
2335: * (i.e. they did not add a replacable param) are made negative. This is
2336: * used to handle Oracle LOB columns. Returns true if there was at least
2337: * one such column or false otherwise.
2338: */
2339: private boolean createInsertSql(JdbcClass jdbcClass,
2340: JdbcTable table, boolean useKeyGenPre,
2341: JdbcColumn classIdCol, int[] fieldNos, int numFieldNos,
2342: CharBuf s, State state) {
2343: JdbcField[] stateFields = jdbcClass.stateFields;
2344: s.clear();
2345: s.append("INSERT INTO ");
2346: s.append(table.name);
2347: s.append(" (");
2348: if (useKeyGenPre)
2349: table.appendInsertPKColumnList(s);
2350: boolean first = !useKeyGenPre;
2351: if (classIdCol != null) {
2352: if (first) {
2353: first = false;
2354: } else {
2355: s.append(',');
2356: s.append(' ');
2357: }
2358: classIdCol.appendNames(s);
2359: }
2360: for (int i = 0; i < numFieldNos; i++) {
2361: int fieldNo = fieldNos[i];
2362: JdbcField f = stateFields[fieldNo];
2363: if (f.mainTableColsForUpdate == null
2364: || f.mainTable != table)
2365: continue;
2366: if (first) {
2367: first = false;
2368: } else {
2369: s.append(',');
2370: s.append(' ');
2371: }
2372: f.appendInsertColumnList(s);
2373: }
2374: s.append(") VALUES (");
2375: if (useKeyGenPre)
2376: table.appendInsertPKValueList(s);
2377: first = !useKeyGenPre;
2378: if (classIdCol != null) {
2379: if (first) {
2380: first = false;
2381: } else {
2382: s.append(',');
2383: s.append(' ');
2384: }
2385: classIdCol.appendParams(s);
2386: }
2387: boolean lobColFound = false;
2388: for (int i = 0; i < numFieldNos; i++) {
2389: int fieldNo = fieldNos[i];
2390: JdbcField f = stateFields[fieldNo];
2391: if (f.mainTableColsForUpdate == null
2392: || f.mainTable != table)
2393: continue;
2394: if (first) {
2395: first = false;
2396: } else {
2397: s.append(',');
2398: s.append(' ');
2399: }
2400: if (f.appendInsertValueList(s, state)) {
2401: // no replaceable param so skip field when params are set
2402: fieldNos[i] = -(fieldNo + 1);
2403: lobColFound = true;
2404: }
2405: }
2406: s.append(")");
2407: return lobColFound;
2408: }
2409:
2410: /**
2411: * Select all the LOB cols for the OIDs in the graph and update them.
2412: * All the states will have the same LOB fields and only LOB fields
2413: * for table will be in the fieldNos array.
2414: */
2415: private void selectAndUpdateOracleLOBCols(CharBuf s,
2416: int startIndex, int index, JdbcClass jdbcClass,
2417: JdbcTable table, int[] fieldNos, int numFieldNos,
2418: PersistGraph graph) throws SQLException {
2419: Connection con = con();
2420: ResultSet rs = null;
2421: PreparedStatement ps = null;
2422: try {
2423: int oidCount = index - startIndex;
2424: int maxOIDsForIN = jdbcClass.getMaxOIDsForIN(sqlDriver);
2425:
2426: if (oidCount > 1 && maxOIDsForIN > 1) {
2427:
2428: // process OIDs in blocks using IN (?, .., ?)
2429: Map map = new HashMap(maxOIDsForIN * 2);
2430: JdbcOID key = (JdbcOID) jdbcClass.cmd.createOID(true);
2431:
2432: int fullBlocks = oidCount / maxOIDsForIN;
2433: if (fullBlocks > 0) {
2434: s.clear();
2435: createSelectLOBsSql(jdbcClass, table, fieldNos,
2436: numFieldNos, s, maxOIDsForIN);
2437: ps = con.prepareStatement(s.toString());
2438: for (int i = 0; i < fullBlocks; i++) {
2439: for (int j = 0; j < maxOIDsForIN;) {
2440: OID oid = graph.getOID(startIndex);
2441: if (oid instanceof NewObjectOID) {
2442: oid = ((NewObjectOID) oid).realOID;
2443: }
2444: map.put(oid, graph
2445: .getNewState(startIndex++));
2446: ((JdbcOID) oid).setParams(ps, ++j);
2447: }
2448: rs = ps.executeQuery();
2449: for (int j = 0; j < maxOIDsForIN; j++) {
2450: rs.next();
2451: key.copyKeyFields(rs, 1);
2452: State ns = (State) map.get(key);
2453: ((JdbcState) ns).setOracleStyleLOBs(rs,
2454: fieldNos, numFieldNos, 2);
2455: }
2456: rs.close();
2457: }
2458: rs = null;
2459: ps.close();
2460: ps = null;
2461: }
2462:
2463: oidCount = oidCount % maxOIDsForIN;
2464: if (oidCount > 1) {
2465: // process partial block
2466: s.clear();
2467: createSelectLOBsSql(jdbcClass, table, fieldNos,
2468: numFieldNos, s, oidCount);
2469: ps = con.prepareStatement(s.toString());
2470: map.clear();
2471: for (int j = 0; j < oidCount;) {
2472: OID oid = graph.getOID(startIndex);
2473: if (oid instanceof NewObjectOID) {
2474: oid = ((NewObjectOID) oid).realOID;
2475: }
2476: map.put(oid, graph.getNewState(startIndex++));
2477: ((JdbcOID) oid).setParams(ps, ++j);
2478: }
2479: rs = ps.executeQuery();
2480: for (int j = 0; j < oidCount; j++) {
2481: rs.next();
2482: key.copyKeyFields(rs, 1);
2483: State ns = (State) map.get(key);
2484: ((JdbcState) ns).setOracleStyleLOBs(rs,
2485: fieldNos, numFieldNos, 2);
2486: }
2487: rs.close();
2488: rs = null;
2489: ps.close();
2490: ps = null;
2491: oidCount = 0;
2492: }
2493: }
2494:
2495: if (oidCount == 1 || maxOIDsForIN <= 1) {
2496: // process OIDs one at a time
2497: s.clear();
2498: createSelectLOBsSql(jdbcClass, table, fieldNos,
2499: numFieldNos, s, 1);
2500: ps = con.prepareStatement(s.toString());
2501: for (int i = startIndex; i < index; i++) {
2502: OID oid = graph.getOID(i);
2503: if (oid instanceof NewObjectOID) {
2504: oid = ((NewObjectOID) oid).realOID;
2505: }
2506: ((JdbcOID) oid).setParams(ps, 1);
2507: rs = ps.executeQuery();
2508: if (!rs.next()) {
2509: throw BindingSupportImpl.getInstance()
2510: .fatalDatastore(
2511: "Row not found: "
2512: + oid.toSString());
2513: }
2514: State ns = graph.getNewState(i);
2515: ((JdbcState) ns).setOracleStyleLOBs(rs, fieldNos,
2516: numFieldNos, 1);
2517: rs.close();
2518: }
2519: rs = null;
2520: ps.close();
2521: ps = null;
2522: }
2523: } finally {
2524: cleanup(rs);
2525: cleanup(ps);
2526: }
2527: }
2528:
2529: /**
2530: * Generate SQL to select Oracle style LOB columns for a table for a
2531: * class heirachy. The SQL must provide for blocksz OID parameters for
2532: * the query. If there is more than one then an IN must be used. The
2533: * blocksz will always be 1 if the table uses a composite primary key.
2534: *
2535: * @see #createInsertSql
2536: */
2537: private void createSelectLOBsSql(JdbcClass jdbcClass,
2538: JdbcTable table, int[] fieldNos, int numFieldNos,
2539: CharBuf s, int blocksz) {
2540: JdbcField[] stateFields = jdbcClass.stateFields;
2541: s.clear();
2542: s.append("SELECT ");
2543: if (blocksz > 1) {
2544: table.appendInsertPKColumnList(s);
2545: s.append(',');
2546: s.append(' ');
2547: }
2548: stateFields[fieldNos[0]].appendInsertColumnList(s);
2549: for (int i = 1; i < numFieldNos; i++) {
2550: s.append(',');
2551: s.append(' ');
2552: stateFields[fieldNos[i]].appendInsertColumnList(s);
2553: }
2554: s.append(" FROM ");
2555: s.append(table.name);
2556: s.append(" WHERE ");
2557: if (blocksz == 1) {
2558: table.appendWherePK(s);
2559: } else {
2560: s.append(table.pk[0].name); // will never be composite pk
2561: s.append(" IN (");
2562: s.append('?');
2563: for (int i = 1; i < blocksz; i++) {
2564: s.append(',');
2565: s.append('?');
2566: }
2567: s.append(')');
2568: }
2569: s.append(" FOR UPDATE");
2570: }
2571:
2572: /**
2573: * Generate update statement(s) for pass1 changes to one or more objects.
2574: *
2575: * @return The index of the last object updated + 1
2576: */
2577: private int generateUpdates(OID oid, int index, ClassMetaData cmd,
2578: PersistGraph graph, int[] fieldNos, boolean haveNewObjects,
2579: CharBuf s) throws SQLException {
2580:
2581: State ns = graph.getNewState(index);
2582: if (!ns.containsPass1Fields())
2583: return ++index;
2584:
2585: JdbcClass jdbcClass = (JdbcClass) cmd.storeClass;
2586: State os = graph.getOldState(index);
2587: boolean usingChanged = jdbcClass.optimisticLocking == JdbcClass.OPTIMISTIC_LOCKING_CHANGED;
2588: JdbcSimpleField optimisticLockingField = jdbcClass.optimisticLockingField;
2589: boolean usingOLF = optimisticLockingField != null;
2590:
2591: Connection con = con();
2592:
2593: // count how many states we can update with the same SQL
2594: int graphSize = graph.size();
2595: // the amount of states with the same fields for update
2596: int count = 1;
2597: for (int i = index + 1; i < graphSize; count++, i++) {
2598: // make sure the next object is not new and has the same class
2599: OID nextOid = graph.getOID(i);
2600: if (Debug.DEBUG) {
2601: if (!nextOid.isNew() && !nextOid.isResolved()) {
2602: throw BindingSupportImpl.getInstance().internal(
2603: "OID is not resolved: " + oid.toSString());
2604: }
2605: }
2606: if (nextOid.isNew() || nextOid.getClassIndex() != cmd.index)
2607: break;
2608:
2609: // make sure the next state has the same field numbers
2610: State nextState = graph.getNewState(i);
2611: if (nextState.compareToPass1(ns) != 0)
2612: break;
2613:
2614: // if we are using changed optimistic locking make sure the next
2615: // old state has the same null fields as the current old state
2616: if (usingChanged) {
2617: if (!((JdbcState) os).hasSameNullFields(graph
2618: .getOldState(i), ns))
2619: break;
2620: }
2621: }
2622:
2623: // decide if we will use statement batching
2624: boolean batch = count > 1 && useBatchUpdate
2625: && !jdbcClass.noBatching;
2626:
2627: // check if the OID must be updated from the state after the update
2628: boolean updateOIDFromState = ns
2629: .containsApplicationIdentityFields();
2630:
2631: // deny update of app identity for inheritance heirachies
2632: if (updateOIDFromState && cmd.isInHeirachy()) {
2633: throw BindingSupportImpl.getInstance().runtime(
2634: "Application identity change for inheritance heirachies "
2635: + "is not supported: " + oid.toSString());
2636: }
2637:
2638: final int numFieldNos = ns.getPass1FieldNos(fieldNos);
2639:
2640: // do a stripe of inserts for each table for the class
2641: int startIndex = index;
2642: int startCount = count;
2643: for (int tableNo = 0; tableNo < jdbcClass.allTables.length; tableNo++) {
2644: JdbcTable table = jdbcClass.allTables[tableNo];
2645: if (tableNo > 0) { // reset after first table
2646: ns = graph.getNewState(index = startIndex);
2647: os = graph.getOldState(index);
2648: oid = graph.getOID(index);
2649: count = startCount;
2650: }
2651:
2652: // create PreparedStatement(s) and do count updates for each
2653: PreparedStatement ps = null;
2654: try {
2655: // generate the SQL for the table
2656: boolean lobColFound = createUpdateSql(jdbcClass, table,
2657: numFieldNos, fieldNos,
2658: (usingOLF && tableNo == 0),
2659: optimisticLockingField, usingChanged, os, ns, s);
2660: String sql = s.toString();
2661: if (sql.length() == 0) {
2662: index = index + startCount;
2663: continue;
2664: }
2665:
2666: ps = con.prepareStatement(sql);
2667:
2668: for (;;) {
2669: if (haveNewObjects) {
2670: ns.replaceNewObjectOIDs(fieldNos, numFieldNos);
2671: }
2672:
2673: // set parameters on ps
2674: try {
2675: int pos = ((JdbcState) ns).setParams(ps,
2676: fieldNos, 0, numFieldNos, 1, graph,
2677: tableNo);
2678: pos = ((JdbcOID) oid).setParams(ps, pos);
2679: if (usingOLF && tableNo == 0) {
2680: //this is only needed on the base table
2681: ((JdbcState) os)
2682: .setOptimisticLockingParams(ps, pos);
2683: } else if (usingChanged) {
2684: ((JdbcState) os)
2685: .setParamsChangedAndNotNull(ps,
2686: fieldNos, 0, numFieldNos,
2687: pos, graph, tableNo);
2688: }
2689: } catch (SQLException e) {
2690: throw handleException(
2691: "Error setting parameters on PreparedStatement for "
2692: + "update of '"
2693: + Utils.toString(oid)
2694: + "':\n"
2695: + JdbcUtils.toString(e)
2696: + "\n"
2697: + JdbcUtils
2698: .getPreparedStatementInfo(
2699: sql, ps), e);
2700: }
2701:
2702: // do the update
2703: if (batch) {
2704: ps.addBatch();
2705: } else {
2706: int uc;
2707: try {
2708: uc = ps.executeUpdate();
2709: } catch (Exception e) {
2710: throw handleException("Update failed: "
2711: + JdbcUtils.toString(e)
2712: + "\n"
2713: + "Row: "
2714: + oid.toSString()
2715: + "\n"
2716: + JdbcUtils
2717: .getPreparedStatementInfo(
2718: sql, ps), e, true,
2719: oid);
2720: }
2721: if (uc == 0) {
2722: throw BindingSupportImpl
2723: .getInstance()
2724: .concurrentUpdate(
2725: "Row not found: "
2726: + oid.toSString()
2727: + "\n"
2728: + JdbcUtils
2729: .getPreparedStatementInfo(
2730: sql,
2731: ps),
2732: oid);
2733: }
2734: }
2735: if (updateOIDFromState)
2736: ns.copyKeyFieldsUpdate(oid);
2737:
2738: index = index + 1;
2739: if (--count == 0)
2740: break;
2741: oid = graph.getOID(index);
2742: ns = graph.getNewState(index);
2743: os = graph.getOldState(index);
2744: }
2745:
2746: // if batching then exec the batch and check all the update counts
2747: if (batch) {
2748: int[] a;
2749: try {
2750: a = ps.executeBatch();
2751: } catch (Exception e) {
2752: throw handleException("Batch update failed: "
2753: + JdbcUtils.toString(e)
2754: + "\n"
2755: + "Row: "
2756: + oid.toSString()
2757: + "\n"
2758: + JdbcUtils.getPreparedStatementInfo(
2759: sql, ps), e, true, oid);
2760: }
2761: for (int j = 0; j < count; j++) {
2762: int c = a[j];
2763: if (c <= 0) {
2764: String psi = JdbcUtils
2765: .getPreparedStatementInfo(sql, ps,
2766: j);
2767: oid = graph.getOID(startIndex + j);
2768: if (c == 0) {
2769: throw BindingSupportImpl
2770: .getInstance()
2771: .concurrentUpdate(
2772: "Row not found: "
2773: + oid
2774: .toSString()
2775: + "\n" + psi,
2776: oid);
2777: }
2778: throw BindingSupportImpl.getInstance()
2779: .datastore(
2780: "Unexpected update count "
2781: + c + " for row: "
2782: + oid.toSString()
2783: + "\n" + psi);
2784: }
2785: }
2786: }
2787:
2788: // If there was at least one Oracle style LOB col we have to
2789: // select all of the non null LOB cols back to give their
2790: // converters a chance to set the data in the LOB.
2791: int lobNumFieldNos;
2792: if (lobColFound
2793: && (lobNumFieldNos = removeNullLOBFields(
2794: fieldNos, numFieldNos, ns)) > 0) {
2795: selectAndUpdateOracleLOBCols(s, startIndex, index,
2796: jdbcClass, table, fieldNos, lobNumFieldNos,
2797: graph);
2798: }
2799: } finally {
2800: cleanup(ps);
2801: }
2802: }
2803: return index;
2804: }
2805:
2806: /**
2807: * This method is used as part of the insert operation to update all newOid's with null
2808: * realOids at the time of the insert. This will only happen if there is a circular dep on the
2809: * creation of id's (e.g. two new Application id instances that uses autoinc pk columns).
2810: */
2811: private int generateUpdatesOfCircularReferences(OID oid,
2812: ClassMetaData cmd, int[] fieldNos, boolean haveNewObjects,
2813: CharBuf s, IntArray toUpdateIndexes, int indexInIntArray,
2814: PersistGraph graph) throws SQLException {
2815:
2816: State ns = graph.getNewState(toUpdateIndexes
2817: .get(indexInIntArray));
2818: if (!ns.containsPass1Fields())
2819: return ++indexInIntArray;
2820:
2821: JdbcClass jdbcClass = (JdbcClass) cmd.storeClass;
2822: State os = graph.getOldState(toUpdateIndexes
2823: .get(indexInIntArray));
2824: boolean usingChanged = jdbcClass.optimisticLocking == JdbcClass.OPTIMISTIC_LOCKING_CHANGED;
2825: JdbcSimpleField optimisticLockingField = jdbcClass.optimisticLockingField;
2826: boolean usingOLF = optimisticLockingField != null;
2827:
2828: // the amount of states with the same fields for update
2829: int count = 1;
2830: for (int i = indexInIntArray + 1; i < toUpdateIndexes.size(); count++, i++) {
2831: // make sure the next object is not new and has the same class
2832: OID nextOid = graph.getOID(toUpdateIndexes.get(i));
2833: if (Debug.DEBUG) {
2834: if (!nextOid.isNew() && !nextOid.isResolved()) {
2835: throw BindingSupportImpl.getInstance().internal(
2836: "OID is not resolved: " + oid.toSString());
2837: }
2838: }
2839: if (nextOid.isNew() || nextOid.getClassIndex() != cmd.index)
2840: break;
2841:
2842: // make sure the next state has the same field numbers
2843: State nextState = graph.getNewState(toUpdateIndexes.get(i));
2844: if (nextState.compareToPass1(ns) != 0)
2845: break;
2846:
2847: // if we are using changed optimistic locking make sure the next
2848: // old state has the same null fields as the current old state
2849: if (usingChanged) {
2850: if (!((JdbcState) os).hasSameNullFields(graph
2851: .getOldState(toUpdateIndexes.get(i)), ns))
2852: break;
2853: }
2854: }
2855:
2856: // decide if we will use statement batching
2857: boolean batch = count > 1 && useBatchUpdate
2858: && !jdbcClass.noBatching;
2859:
2860: // check if the OID must be updated from the state after the update
2861: // boolean updateOIDFromState = ns.containsApplicationIdentityFields();
2862:
2863: // deny update of app identity for inheritance heirachies
2864: // if (updateOIDFromState && cmd.isInHeirachy()) {
2865: // throw BindingSupportImpl.getInstance().runtime("Application identity change for inheritance heirachies " +
2866: // "is not supported: " + oid.toSString());
2867: // }
2868:
2869: final int numFieldNos = ns
2870: .getPass1FieldRefFieldNosWithNewOids(fieldNos);
2871:
2872: // do a stripe of inserts for each table for the class
2873: Connection con = con();
2874: int startIndex = indexInIntArray;
2875: int startCount = count;
2876: for (int tableNo = 0; tableNo < jdbcClass.allTables.length; tableNo++) {
2877: JdbcTable table = jdbcClass.allTables[tableNo];
2878: if (tableNo > 0) { // reset after first table
2879: ns = graph.getNewState(toUpdateIndexes
2880: .get(indexInIntArray = startIndex));
2881: os = graph.getOldState(toUpdateIndexes
2882: .get(indexInIntArray));
2883: oid = graph
2884: .getOID(toUpdateIndexes.get(indexInIntArray));
2885: count = startCount;
2886: }
2887:
2888: // create PreparedStatement(s) and do count updates for each
2889: PreparedStatement ps = null;
2890: try {
2891: // generate the SQL for the table
2892: createUpdateSql(jdbcClass, table, numFieldNos,
2893: fieldNos, false, optimisticLockingField, false,
2894: os, ns, s);
2895: String sql = s.toString();
2896: if (sql.length() == 0) {
2897: indexInIntArray = indexInIntArray + startCount;
2898: continue;
2899: }
2900:
2901: ps = con.prepareStatement(sql);
2902:
2903: for (;;) {
2904: if (haveNewObjects) {
2905: ns.replaceNewObjectOIDs(fieldNos, numFieldNos);
2906: }
2907:
2908: // set parameters on ps
2909: try {
2910: int pos = ((JdbcState) ns).setParams(ps,
2911: fieldNos, 0, numFieldNos, 1, graph,
2912: tableNo);
2913: pos = ((JdbcOID) oid).setParams(ps, pos);
2914: if (os != null && usingOLF && tableNo == 0) {
2915: //this is only needed on the base table
2916: ((JdbcState) os)
2917: .setOptimisticLockingParams(ps, pos);
2918: } else if (usingChanged) {
2919: ((JdbcState) os)
2920: .setParamsChangedAndNotNull(ps,
2921: fieldNos, 0, numFieldNos,
2922: pos, graph, tableNo);
2923: }
2924: } catch (SQLException e) {
2925: throw handleException(
2926: "Error setting parameters on PreparedStatement for "
2927: + "update of '"
2928: + Utils.toString(oid)
2929: + "':\n"
2930: + JdbcUtils.toString(e)
2931: + "\n"
2932: + JdbcUtils
2933: .getPreparedStatementInfo(
2934: sql, ps), e);
2935: }
2936:
2937: // do the update
2938: if (batch) {
2939: ps.addBatch();
2940: } else {
2941: int uc;
2942: try {
2943: uc = ps.executeUpdate();
2944: } catch (Exception e) {
2945: throw handleException("Update failed: "
2946: + JdbcUtils.toString(e)
2947: + "\n"
2948: + "Row: "
2949: + oid.toSString()
2950: + "\n"
2951: + JdbcUtils
2952: .getPreparedStatementInfo(
2953: sql, ps), e, true,
2954: oid);
2955: }
2956: if (uc == 0) {
2957: throw BindingSupportImpl
2958: .getInstance()
2959: .concurrentUpdate(
2960: "Row not found: "
2961: + oid.toSString()
2962: + "\n"
2963: + JdbcUtils
2964: .getPreparedStatementInfo(
2965: sql,
2966: ps),
2967: oid);
2968: }
2969: }
2970:
2971: indexInIntArray = indexInIntArray + 1;
2972: if (--count == 0)
2973: break;
2974: oid = graph.getOID(toUpdateIndexes
2975: .get(indexInIntArray));
2976: ns = graph.getNewState(toUpdateIndexes
2977: .get(indexInIntArray));
2978: os = graph.getOldState(toUpdateIndexes
2979: .get(indexInIntArray));
2980: }
2981:
2982: // if batching then exec the batch and check all the update counts
2983: if (batch) {
2984: int[] a;
2985: try {
2986: a = ps.executeBatch();
2987: } catch (Exception e) {
2988: throw handleException("Batch update failed: "
2989: + JdbcUtils.toString(e)
2990: + "\n"
2991: + "Row: "
2992: + oid.toSString()
2993: + "\n"
2994: + JdbcUtils.getPreparedStatementInfo(
2995: sql, ps), e, true, oid);
2996: }
2997: for (int j = 0; j < count; j++) {
2998: int c = a[j];
2999: if (c <= 0) {
3000: String psi = JdbcUtils
3001: .getPreparedStatementInfo(sql, ps,
3002: j);
3003: oid = graph.getOID(startIndex + j);
3004: if (c == 0) {
3005: throw BindingSupportImpl
3006: .getInstance()
3007: .concurrentUpdate(
3008: "Row not found: "
3009: + oid
3010: .toSString()
3011: + "\n" + psi,
3012: oid);
3013: }
3014: throw BindingSupportImpl.getInstance()
3015: .datastore(
3016: "Unexpected update count "
3017: + c + " for row: "
3018: + oid.toSString()
3019: + "\n" + psi);
3020: }
3021: }
3022: }
3023: } finally {
3024: cleanup(ps);
3025: }
3026: }
3027: return indexInIntArray;
3028: }
3029:
3030: /**
3031: * Generate SQL to update a row in the base table for a class heirachy.
3032: * Any entries in fieldNos for fields that return true to appendUpdate
3033: * (i.e. they did not add a replacable param) are made negative. This is
3034: * used to handle Oracle LOB columns. Returns true if there was at least
3035: * one such column or false otherwise.
3036: */
3037: private boolean createUpdateSql(JdbcClass jdbcClass,
3038: JdbcTable table, int numFieldNos, int[] fieldNos,
3039: boolean usingOLF, JdbcSimpleField optimisticLockingField,
3040: boolean usingChanged, State os, State ns, CharBuf s) {
3041: JdbcField[] fields = jdbcClass.stateFields;
3042: boolean lobColFound = false;
3043: s.clear();
3044: s.append("UPDATE ");
3045: s.append(table.name);
3046: s.append(" SET ");
3047: boolean first = true;
3048: for (int i = 0; i < numFieldNos; i++) {
3049: int fieldNo = fieldNos[i];
3050: JdbcField f = fields[fieldNo];
3051: if (f.mainTableColsForUpdate == null
3052: || f.mainTable != table)
3053: continue;
3054: if (first) {
3055: first = false;
3056: } else {
3057: s.append(',');
3058: s.append(' ');
3059: }
3060: if (f.appendUpdate(s, ns)) {
3061: // no replaceable param so skip field when params are set
3062: fieldNos[i] = -(fieldNo + 1);
3063: lobColFound = true;
3064: }
3065: }
3066: if (first) { // no columns to update
3067: s.clear();
3068: } else {
3069: s.append(" WHERE ");
3070: table.appendWherePK(s);
3071: if (usingOLF) {
3072: s.append(" AND ");
3073: optimisticLockingField.appendWhere(s, sqlDriver);
3074: } else if (usingChanged) {
3075: for (int i = 0; i < numFieldNos; i++) {
3076: int fieldNo = fieldNos[i];
3077: if (fieldNo < 0)
3078: continue;
3079: JdbcField f = fields[fieldNo];
3080: if (f.mainTableColsForUpdate == null
3081: || !f.includeForChangedLocking
3082: || f.mainTable != table) {
3083: continue;
3084: }
3085: s.append(" AND ");
3086: if (os.isNull(fieldNo)) {
3087: f.appendWhereIsNull(s, sqlDriver);
3088: } else {
3089: f.appendWhere(s, sqlDriver);
3090: }
3091: }
3092: }
3093: }
3094: return lobColFound;
3095: }
3096:
3097: /**
3098: * Perist fields stored in link tables and so on.
3099: *
3100: * @see #persistPass1
3101: */
3102: private void persistPass2(PersistGraph graph) {
3103: try {
3104: int[] fieldNos = new int[jmd.maxFieldsLength];
3105: CharBuf s = new CharBuf();
3106: int graphSize = graph.size();
3107:
3108: // process blocks of the same class together
3109: int startIndex = 0;
3110: for (; startIndex < graphSize;) {
3111: State ns = graph.getNewState(startIndex);
3112: if (!ns.containsPass2Fields()) {
3113: startIndex++;
3114: continue;
3115: }
3116:
3117: // count entries in the graph with the same class
3118: int classIndex = ns.getClassIndex();
3119: int blockEnd = startIndex;
3120: for (; ++blockEnd < graphSize
3121: && graph.getNewState(blockEnd).getClassIndex() == classIndex;) {
3122: ;
3123: }
3124:
3125: // get info common to all the entries in the block
3126: ClassMetaData cmd = ns.getClassMetaData(jmd);
3127: Connection con = con();
3128:
3129: // find the fields we need to check for each block entry
3130: int[] fna;
3131: int nf;
3132: if (blockEnd - startIndex == 1) {
3133: // only one entry so check only its pass 2 fields
3134: nf = ns.getPass2FieldNos(fieldNos);
3135: fna = fieldNos;
3136: } else {
3137: // multiple entries so check all pass 2 fields
3138: fna = cmd.pass2Fields;
3139: nf = fna.length;
3140: }
3141:
3142: // process block for each fieldNo so we can make best use
3143: // of PreparedStatement's and batching
3144: for (int fpos = 0; fpos < nf; fpos++) {
3145: int fieldNo = fna[fpos];
3146: FieldMetaData fmd = cmd.stateFields[fieldNo];
3147: ((JdbcField) fmd.storeField).persistPass2Block(
3148: graph, startIndex, blockEnd, s, con,
3149: useBatchInsert, useBatchUpdate);
3150: }
3151:
3152: startIndex = blockEnd;
3153: }
3154: } catch (SQLException e) {
3155: throw handleException(e);
3156: }
3157: }
3158:
3159: private void doDeletes(DeletePacket toDelete) {
3160: if (!toDelete.isKeepStates()) {
3161: toDelete.sortOIDs(new OIDRefGraphIndexComparator());
3162: }
3163: deletePass1(toDelete);
3164: deletePass2(toDelete);
3165: }
3166:
3167: /**
3168: * Delete rows from link tables.
3169: *
3170: * @see #deletePass2
3171: */
3172: private void deletePass1(DeletePacket graph) {
3173: try {
3174: CharBuf s = new CharBuf();
3175: int graphSize = graph.size();
3176:
3177: for (int startIndex = 0; startIndex < graphSize;) {
3178:
3179: OID oid = graph.oids[startIndex];
3180: int classIndex = oid.getClassIndex();
3181: ClassMetaData cmd = jmd.classes[classIndex];
3182:
3183: // count entries in the graph with the same class
3184: int blockEnd = startIndex;
3185: for (; ++blockEnd < graphSize
3186: && graph.oids[blockEnd].getClassIndex() == classIndex;) {
3187: ;
3188: }
3189:
3190: // get info common to all the entries in the block
3191: Connection con = con();
3192:
3193: // find the fields we need to check for each block entry
3194: int[] fna = cmd.pass2Fields;
3195: int nf = fna.length;
3196:
3197: // process block for each fieldNo so we can make best use
3198: // of PreparedStatement's and batching
3199: for (int fpos = 0; fpos < nf; fpos++) {
3200: int fieldNo = fna[fpos];
3201: FieldMetaData fmd = cmd.stateFields[fieldNo];
3202: ((JdbcField) fmd.storeField).deletePass2Block(
3203: graph, startIndex, blockEnd, s, con,
3204: useBatchUpdate);
3205: }
3206:
3207: startIndex = blockEnd;
3208: }
3209: } catch (SQLException e) {
3210: throw handleException(e);
3211: }
3212: }
3213:
3214: /**
3215: * Delete main table rows.
3216: *
3217: * @see #deletePass1
3218: */
3219: public void deletePass2(DeletePacket graph) {
3220: try {
3221: CharBuf s = new CharBuf();
3222: int graphSize = graph.size();
3223:
3224: int count;
3225: for (int startIndex = 0; startIndex < graphSize; startIndex += count) {
3226: OID oid = graph.oids[startIndex];
3227: int classIndex = oid.getClassIndex();
3228: ClassMetaData cmd = jmd.classes[classIndex];
3229: JdbcClass jdbcClass = (JdbcClass) cmd.storeClass;
3230: Connection con = con();
3231:
3232: boolean batch = useBatchUpdate;
3233: boolean useInList = jdbcClass.table.pkSimpleCols.length == 1;
3234:
3235: count = 1;
3236: for (int index = startIndex + 1; index < graphSize; count++, index++) {
3237: if (graph.oids[startIndex + count]
3238: .getClassMetaData() != cmd)
3239: break;
3240: }
3241: if (count == 1) {
3242: useInList = false;
3243: }
3244:
3245: PreparedStatement ps = null;
3246: try {
3247: if (!batch && !useInList) {
3248: //delete heirarchies one-by-one
3249: int n = jdbcClass.allTables.length;
3250: for (int tableNo = n - 1; tableNo >= 0; tableNo--) {
3251: //must create ps now
3252: String sql = getDeleteRowSql(
3253: jdbcClass.allTables[tableNo], s);
3254: ps = con.prepareStatement(sql);
3255:
3256: for (int i = 0; i < count; i++) {
3257: deleteRow(ps,
3258: (JdbcOID) graph.oids[startIndex
3259: + i], sql);
3260: }
3261: }
3262: } else if (useInList) {
3263: //use 'IN' list
3264: final int maxInOps = sqlDriver
3265: .getMaxInOperands();
3266: final char[] whereParam = sqlDriver
3267: .getSqlParamStringChars(jdbcClass.table.pkSimpleCols[0].jdbcType);
3268:
3269: if (count <= maxInOps) {
3270: final char[] totalWhereParams = createInParamArray(
3271: whereParam, count);
3272: int n = jdbcClass.allTables.length;
3273: for (int tableNo = n - 1; tableNo >= 0; tableNo--) {
3274: getDeleteRowSqlWithInList(
3275: jdbcClass.allTables[tableNo], s);
3276: s.append(totalWhereParams);
3277: String sql = s.toString();
3278: ps = con.prepareStatement(sql);
3279: for (int i = 0; i < count; i++) {
3280: ((JdbcOID) graph.oids[startIndex
3281: + i])
3282: .setParams(ps, (i + 1));
3283: }
3284: try {
3285: ps.executeUpdate();
3286: } catch (Exception e) {
3287: throw handleException(
3288: "Delete with IN list failed: "
3289: + JdbcUtils
3290: .toString(e)
3291: + "\n"
3292: + JdbcUtils
3293: .getPreparedStatementInfo(
3294: sql,
3295: ps),
3296: e);
3297: }
3298: }
3299: } else {
3300: int n = jdbcClass.allTables.length;
3301: int amountLeft = count
3302: % sqlDriver.getMaxInOperands();
3303: int amountOfFullRuns = count / maxInOps;
3304:
3305: char[] totalWhereParams1 = null;
3306: if (amountLeft > 0) {
3307: totalWhereParams1 = createInParamArray(
3308: whereParam, amountLeft);
3309: }
3310: char[] totalWhereParams2 = createInParamArray(
3311: whereParam, maxInOps);
3312:
3313: for (int tableNo = n - 1; tableNo >= 0; tableNo--) {
3314: String sql = null;
3315: int pos = startIndex;
3316: if (amountLeft > 0) {
3317: //do the smaller amount first and once
3318: getDeleteRowSqlWithInList(
3319: jdbcClass.allTables[tableNo],
3320: s);
3321: s.append(totalWhereParams1);
3322:
3323: sql = s.toString();
3324: ps = con.prepareStatement(sql);
3325: for (int i = 0; i < amountLeft; i++) {
3326: ((JdbcOID) graph.oids[pos++])
3327: .setParams(ps, (i + 1));
3328: }
3329: try {
3330: ps.executeUpdate();
3331: } catch (Exception e) {
3332: throw handleException(
3333: "Delete with IN list failed: "
3334: + JdbcUtils
3335: .toString(e)
3336: + "\n"
3337: + JdbcUtils
3338: .getPreparedStatementInfo(
3339: sql,
3340: ps),
3341: e);
3342: }
3343: }
3344:
3345: getDeleteRowSqlWithInList(
3346: jdbcClass.allTables[tableNo], s);
3347: s.append(totalWhereParams2);
3348: sql = s.toString();
3349: ps = con.prepareStatement(sql);
3350:
3351: //do the full runs
3352: for (int i = 0; i < amountOfFullRuns; i++) {
3353: for (int j = 0; j < maxInOps; j++) {
3354: ((JdbcOID) graph.oids[pos++])
3355: .setParams(ps, (j + 1));
3356: }
3357: try {
3358: ps.executeUpdate();
3359: } catch (Exception e) {
3360: throw handleException(
3361: "Delete with IN list failed: "
3362: + JdbcUtils
3363: .toString(e)
3364: + "\n"
3365: + JdbcUtils
3366: .getPreparedStatementInfo(
3367: sql,
3368: ps),
3369: e);
3370: }
3371: }
3372: }
3373: }
3374: } else {
3375: //use batching
3376: int n = jdbcClass.allTables.length;
3377: for (int tableNo = n - 1; tableNo >= 0; tableNo--) {
3378: String sql = getDeleteRowSql(
3379: jdbcClass.allTables[tableNo], s);
3380: ps = con.prepareStatement(sql);
3381: for (int i = 0; i < count; i++) {
3382: ((JdbcOID) graph.oids[startIndex + i])
3383: .setParams(ps, 1);
3384: ps.addBatch();
3385: }
3386:
3387: int[] a;
3388: try {
3389: a = ps.executeBatch();
3390: } catch (Exception e) {
3391: throw handleException(
3392: "Batch delete failed: "
3393: + JdbcUtils.toString(e)
3394: + "\n"
3395: + "Row: "
3396: + graph.oids[startIndex]
3397: .toSString()
3398: + "\n"
3399: + JdbcUtils
3400: .getPreparedStatementInfo(
3401: sql, ps),
3402: e, true, graph.oids[startIndex]);
3403: }
3404: for (int j = 0; j < count; j++) {
3405: int c = a[j];
3406: if (c <= 0) {
3407: String psi = JdbcUtils
3408: .getPreparedStatementInfo(
3409: sql, ps, j);
3410: if (c == 0) {
3411: throw BindingSupportImpl
3412: .getInstance()
3413: .concurrentUpdate(
3414: "Row not found: "
3415: + graph.oids[startIndex
3416: + j]
3417: .toSString()
3418: + "\n"
3419: + psi,
3420: graph.oids[startIndex
3421: + j]);
3422: }
3423: throw BindingSupportImpl
3424: .getInstance()
3425: .datastore(
3426: "Unexpected update count "
3427: + c
3428: + " for row: "
3429: + graph.oids[startIndex
3430: + j]
3431: .toSString()
3432: + "\n"
3433: + psi);
3434: }
3435: }
3436: }
3437: }
3438: } finally {
3439: cleanup(ps);
3440: }
3441: }
3442: } catch (SQLException e) {
3443: throw handleException(e);
3444: }
3445: }
3446:
3447: private char[] createInParamArray(final char[] whereParam, int count) {
3448: int pos = 0;
3449: char[] totalWhereParams;
3450: if (count == 1) {
3451: totalWhereParams = new char[whereParam.length * count + 1];
3452: } else {
3453: totalWhereParams = new char[whereParam.length * count
3454: + (count - 1) + 1];
3455: }
3456: for (int i = 0; i < count; i++) {
3457: if (i != 0)
3458: totalWhereParams[pos++] = ',';
3459: for (int j = 0; j < whereParam.length; j++) {
3460: totalWhereParams[pos++] = whereParam[j];
3461: }
3462: }
3463: totalWhereParams[pos] = ')';
3464: return totalWhereParams;
3465: }
3466:
3467: private void deleteRow(PreparedStatement ps, JdbcOID oid, String sql) {
3468: int uc;
3469: try {
3470: oid.setParams(ps, 1);
3471: uc = ps.executeUpdate();
3472: } catch (Exception e) {
3473: throw handleException("Delete failed: "
3474: + JdbcUtils.toString(e) + "\n" + "Row: "
3475: + oid.toSString() + "\n"
3476: + JdbcUtils.getPreparedStatementInfo(sql, ps), e,
3477: true, oid);
3478: }
3479: if (uc == 0) {
3480: throw BindingSupportImpl.getInstance().concurrentUpdate(
3481: "Row not found: "
3482: + oid.toSString()
3483: + "\n"
3484: + JdbcUtils.getPreparedStatementInfo(sql,
3485: ps), oid);
3486: }
3487: }
3488:
3489: private String getDeleteRowSql(JdbcTable table, CharBuf s) {
3490: String sql = table.deleteRowSql;
3491: if (sql != null)
3492: return sql;
3493: s.clear();
3494: s.append("DELETE FROM ");
3495: s.append(table.name);
3496: s.append(" WHERE ");
3497: table.appendWherePK(s);
3498: return table.deleteRowSql = s.toString();
3499: }
3500:
3501: private void getDeleteRowSqlWithInList(JdbcTable table, CharBuf s) {
3502: s.clear();
3503: s.append("DELETE FROM ");
3504: s.append(table.name);
3505: s.append(" WHERE ");
3506: s.append(table.pkSimpleCols[0].name);
3507: s.append(" IN (");
3508: }
3509:
3510: /**
3511: * Close all open queries.
3512: */
3513: private final void closeAllQueries() {
3514: for (JdbcQueryResult res = queryResultHead; res != null;) {
3515: JdbcQueryResult n = res.prev;
3516: res.close();
3517: res.next = null;
3518: res.prev = null;
3519: if (n == null)
3520: break;
3521: res = n;
3522: }
3523: queryResultHead = null;
3524: queryResultTail = null;
3525: }
3526:
3527: private JdbcCompiledQuery compile(QueryDetails q) {
3528: JdbcCompiledQuery cq = null;
3529: int language = q.getLanguage();
3530: if (language == QueryDetails.LANGUAGE_EJBQL) {
3531:
3532: cq = new JdbcEJBQLCompiler(this ).compile(q);
3533:
3534: } else if (language == QueryDetails.LANGUAGE_SQL) {
3535: ClassMetaData cmd = null;
3536: if (q.getCandidateClass() != null) {
3537: cmd = jmd.getClassMetaData(q.getCandidateClass());
3538: }
3539:
3540: //unset everything not used for sql queries
3541: q.setOrdering(null);
3542: q.setGrouping(null);
3543: q.setVariables(null);
3544: q.setCol(null);
3545: q.setImports(null);
3546: q.setResult(null);
3547:
3548: cq = new JdbcCompiledQuery(cmd, q);
3549: cq.setCacheable(false); // SQL queries not cached by default
3550:
3551: CmdBitSet bits = new CmdBitSet(jmd);
3552: if (cmd != null) {
3553: bits.addPlus(cmd);
3554: }
3555: int[] a = q.getExtraEvictClasses();
3556: if (a != null) {
3557: for (int i = a.length - 1; i >= 0; i--) {
3558: bits.add(jmd.classes[a[i]]);
3559: }
3560: }
3561: cq.setFilterClsIndexs(bits.toArray());
3562: cq.setEvictionClassBits(bits.getBits());
3563: cq.setEvictionClassIndexes(bits.getIndexes());
3564: } else {
3565: cq = new JdbcJDOQLCompiler(this ).compile(q);
3566: }
3567: return cq;
3568: }
3569:
3570: public LogEventStore getPerfEventStore() {
3571: return pes;
3572: }
3573:
3574: private void addQueryResult(JdbcQueryResult res) {
3575: if (res.next != null || res.prev != null) {
3576: throw BindingSupportImpl
3577: .getInstance()
3578: .internal(
3579: "Adding a duplicate queryResult to query linked list");
3580: }
3581:
3582: res.prev = queryResultHead;
3583: if (queryResultHead != null)
3584: queryResultHead.next = res;
3585: queryResultHead = res;
3586: if (queryResultTail == null)
3587: queryResultTail = res;
3588: }
3589:
3590: private void removeQueryResult(JdbcQueryResult res) {
3591: if (res.prev != null) {
3592: res.prev.next = res.next;
3593: } else {
3594: queryResultTail = res.next;
3595: }
3596: if (res.next != null) {
3597: res.next.prev = res.prev;
3598: } else {
3599: queryResultHead = res.prev;
3600: }
3601: res.next = null;
3602: res.prev = null;
3603: }
3604:
3605: private void fillContainerWithAll(ApplicationContext context,
3606: JdbcCompiledQuery cq, Object[] params,
3607: QueryResultContainer container) {
3608: JdbcQueryResult res = null;
3609: if (cq.isEJBQLHack()) {
3610:
3611: res = new JdbcQueryResultEJBQL(this , cq, params,
3612: canUseCache());
3613:
3614: } else {
3615: res = new JdbcQueryResult(this , cq, params, canUseCache());
3616: }
3617: res.getAllResults(context, container, forUpdateField);
3618: }
3619:
3620: private int executeCount(JdbcCompiledQuery cq, Object[] params) {
3621: PreparedStatement ps = null;
3622: ResultSet rs = null;
3623: try {
3624: Connection con = con();
3625:
3626: // this sync block can be removed when compiledQuery is no longer shared
3627: String sql;
3628: synchronized (cq) {
3629: cq.updateSql(sqlDriver, params, false, true);
3630: ps = con.prepareStatement(sql = cq.getSql());
3631: cq.setParamsOnPS(jmd, sqlDriver, ps, params, sql);
3632: }
3633: try {
3634: rs = ps.executeQuery();
3635: } catch (Exception e) {
3636: throw sqlDriver.mapException(e,
3637: "Count(*) query failed: "
3638: + JdbcUtils.toString(e)
3639: + "\n"
3640: + JdbcUtils.getPreparedStatementInfo(
3641: sql, ps), true);
3642: }
3643: if (!rs.next()) {
3644: throw BindingSupportImpl.getInstance().fatalDatastore(
3645: "No row returned by count(*) query:\n"
3646: + JdbcUtils.getPreparedStatementInfo(
3647: sql, ps));
3648: }
3649: return rs.getInt(1);
3650: } catch (SQLException x) {
3651: handleException(x);
3652: return 0; // keep compiler happy
3653: } finally {
3654: cleanup(rs);
3655: cleanup(ps);
3656: }
3657: }
3658:
3659: private VersantQueryPlan executePlan(JdbcCompiledQuery cq,
3660: Object[] params) {
3661: VersantQueryPlan qp = new VersantQueryPlan();
3662: // this sync block can go when compiledQuery is no longer shared
3663: synchronized (cq) {
3664: cq.updateSql(sqlDriver, params, forUpdateField, false);
3665: qp.setDatastoreQuery(cq.getSql());
3666: if (params == null) { // query plans can only be done when there is no params
3667: try {
3668: Connection con = con();
3669: PreparedStatement ps;
3670: String sql;
3671: try {
3672: sql = sqlDriver.prepareForGetQueryPlan(con, cq
3673: .getSql());
3674: ps = con.prepareStatement(sql);
3675: qp.setDatastorePlan(sqlDriver.getQueryPlan(con,
3676: ps));
3677: } finally {
3678: sqlDriver.cleanupForGetQueryPlan(con);
3679: }
3680: cq.setParamsOnPS(jmd, sqlDriver, ps, params, sql);
3681: } catch (SQLException e) {
3682: qp.setDatastorePlan(e.getMessage());
3683: }
3684: } else {
3685: qp
3686: .setDatastorePlan("Query plan can only be done when there are no parameters.");
3687: }
3688: }
3689: return qp;
3690: }
3691:
3692: /**
3693: * Look for cached query results and add them to the container if there
3694: * are some. Returns true if results were found and false otherwise.
3695: */
3696: private boolean checkCacheForQuery(JdbcCompiledQuery cq,
3697: Object[] params, QueryResultContainer qContainer) {
3698: CachedQueryResult data = cache.getQueryResult(cq, params);
3699: if (data == null) {
3700: return false;
3701: }
3702: StatesReturned container = qContainer.container;
3703:
3704: // add all the results to the qContainer cloning all mutable stuff
3705: if (data.results != null) {
3706: qContainer.fillFrom(data);
3707: // update the container with the oid-state pairs
3708: if (cq.isDefaultResult()) {
3709: // this is a query that only contains oids
3710: ArrayList res = data.results;
3711: int n = res.size();
3712: for (int i = 0; i < n; i++) {
3713: OID oid = (OID) res.get(i);
3714: if (oid == null)
3715: break;
3716: State s = cache.getState(oid, null);
3717: if (s == null) {
3718: cache.evict(cacheTx(), cq, params);
3719: qContainer.reset();
3720: return false;
3721: }
3722: container.add(oid, s);
3723: }
3724: } else {
3725: final int firstThisCol = cq.getFirstThisIndex();
3726: if (firstThisCol >= 0 && data.results != null) {
3727: // first put in all the primary query results in correct order
3728: ArrayList res = data.results;
3729: int n = res.size();
3730: for (int i = 0; i < n; i++) {
3731: Object[] row = (Object[]) res.get(i);
3732: OID oid = (OID) row[firstThisCol];
3733: State s = cache.getState(oid, null);
3734: if (s == null) {
3735: cache.evict(cacheTx(), cq, params);
3736: qContainer.reset();
3737: return false;
3738: }
3739: container.add(oid, s);
3740: }
3741: }
3742: }
3743: }
3744:
3745: // process indirect oids
3746: if (data.indirectOIDs != null) {
3747: OID[] indirectOIDs = data.indirectOIDs.oids;
3748: int n = indirectOIDs.length;
3749: for (int i = 0; i < n; i++) {
3750: OID oid = indirectOIDs[i];
3751: if (oid == null)
3752: break;
3753: State s = cache.getState(oid, null);
3754: if (s == null) {
3755: cache.evict(cacheTx(), cq, params);
3756: container.clear();
3757: qContainer.reset();
3758: return false;
3759: }
3760: container.addIndirect(oid, s);
3761: }
3762: }
3763:
3764: qContainer.allResults = true;
3765: return true;
3766: }
3767:
3768: public SqlDriver getSqlDriver() {
3769: return sqlDriver;
3770: }
3771:
3772: /**
3773: * Skip over states not needed from a select. This follows the same
3774: * recursive algorithm as populateStateFromSelect but does not read
3775: * anything. It is usefull if some states have to be skipped but
3776: * others need to be read from other columns in the ResultSet.
3777: */
3778: public int skipState(int firstCol, FgDs fgds) {
3779: return firstCol + fgds.columnSkipCount;
3780: }
3781:
3782: public boolean isFlushed() {
3783: return flushed;
3784: }
3785:
3786: public StorageCache getCache() {
3787: return cache;
3788: }
3789:
3790: public boolean isUseBatchInsert() {
3791: return useBatchInsert;
3792: }
3793:
3794: public boolean isUseBatchUpdate() {
3795: return useBatchUpdate;
3796: }
3797:
3798: public JdbcMetaData getJdbcMetaData() {
3799: return (JdbcMetaData) jmd.jdbcMetaData;
3800: }
3801:
3802: /**
3803: * Get all of the states for the class and others pulled in by the fg.
3804: */
3805: private void getAllStates(ApplicationContext context,
3806: ClassMetaData cmd, FetchGroup fg, StatesReturned all) {
3807: fg = fg.resolve(cmd);
3808: QueryDetails qd = new QueryDetails();
3809: qd.setBounded(true);
3810: qd.setCandidateClass(cmd.cls);
3811: qd.setFilter(null);
3812: qd.setFetchGroupIndex(fg.index);
3813: qd.updateCounts();
3814: JdbcCompiledQuery cq = (JdbcCompiledQuery) compileQuery(qd);
3815:
3816: QueryResultContainer qrc = new QueryResultContainer(all);
3817: qrc.init(cq);
3818:
3819: JdbcQueryResult res = null;
3820: if (cq.isEJBQLHack()) {
3821:
3822: res = new JdbcQueryResultEJBQL(this , cq, new Object[0],
3823: canUseCache());
3824:
3825: } else {
3826: res = new JdbcQueryResult(this , cq, new Object[0], false);
3827: }
3828:
3829: res.getAllResults(context, qrc, forUpdateField);
3830: }
3831:
3832: /**
3833: * Add all of the cached query results to the container. This returns
3834: * false if any of the states are no longer in the level 2 cache.
3835: */
3836: private boolean addToContainer(JdbcCompiledQuery cq,
3837: Object[] params, CachedQueryResult data,
3838: QueryResultContainer qContainer) {
3839: if (data.results != null) {
3840: qContainer.fillFrom(data);
3841: // this is to update the container with the oid-state pairs
3842: if (cq.isDefaultResult()) {
3843: //this is a query that only contains oids
3844: ArrayList res = data.results;
3845: int n = res.size();
3846: for (int i = 0; i < n; i++) {
3847: OID oid = (OID) res.get(i);
3848: if (oid == null)
3849: break;
3850: State s = cache.getState(oid, null);
3851: if (s == null) {
3852: cache.evict(cacheTx, cq, params);
3853: qContainer.reset();
3854: return false;
3855: }
3856: qContainer.container.add(oid, s);
3857: }
3858: } else {
3859: final int firstThisCol = cq.getFirstThisIndex();
3860: if (firstThisCol >= 0 && data.results != null) {
3861: // first put in all the primary query results in correct order
3862: ArrayList res = data.results;
3863: int n = res.size();
3864: for (int i = 0; i < n; i++) {
3865: Object[] row = (Object[]) res.get(i);
3866: OID oid = (OID) row[firstThisCol];
3867: State s = cache.getState(oid, null);
3868: if (s == null) {
3869: cache.evict(cacheTx, cq, params);
3870: qContainer.reset();
3871: return false;
3872: }
3873: qContainer.container.add(oid, s);
3874: }
3875: }
3876: }
3877: }
3878:
3879: // process indirect oids
3880: if (data.indirectOIDs != null) {
3881: OID[] indirectOIDs = data.indirectOIDs.oids;
3882: int n = indirectOIDs.length;
3883: for (int i = 0; i < n; i++) {
3884: OID oid = indirectOIDs[i];
3885: if (oid == null)
3886: break;
3887: State s = cache.getState(oid, null);
3888: if (s == null) {
3889: cache.evict(cacheTx, cq, params);
3890: qContainer.container.clear();
3891: qContainer.reset();
3892: return false;
3893: }
3894: qContainer.container.addIndirect(oid, s);
3895: }
3896: }
3897:
3898: qContainer.allResults = true;
3899: return true;
3900: }
3901:
3902: public JdbcConnectionSource getJdbcConnectionSource() {
3903: return conSrc;
3904: }
3905:
3906: /**
3907: * Count the number of open query results.
3908: */
3909: private int getOpenQueryResultCount() {
3910: int c = 0;
3911: for (JdbcQueryResult i = queryResultTail; i != null; i = i.next, ++c)
3912: ;
3913: return c;
3914: }
3915:
3916: /**
3917: * Do we have a JDBC Connection?
3918: */
3919: public boolean hasDatastoreConnection() {
3920: return conx != null;
3921: }
3922:
3923: public Map getStatus() {
3924: Map m = new HashMap();
3925: m.put(STATUS_OPEN_QUERY_RESULT_COUNT, new Integer(
3926: getOpenQueryResultCount()));
3927: return m;
3928: }
3929:
3930: /**
3931: * This compares OIDs based on the referenceGraphIndex'es of their base class
3932: * meta data.
3933: */
3934: public class OIDRefGraphIndexComparator implements Comparator {
3935:
3936: public int compare(Object o1, Object o2) {
3937: ClassMetaData ca = ((OID) o1).getAvailableClassMetaData();
3938: ClassMetaData cb = ((OID) o2).getAvailableClassMetaData();
3939: int diff = ca.referenceGraphIndex - cb.referenceGraphIndex;
3940: if (diff != 0)
3941: return diff;
3942: return ca.index - cb.index;
3943: }
3944:
3945: }
3946:
3947: /**
3948: * This is used to clear all non-auto set field on states that returned to
3949: * the client. This is done to save network traffic.
3950: */
3951: private void clearNonAutoSetFields(StatesReturned container) {
3952: for (Iterator i = container.iterator(); i.hasNext();) {
3953: EntrySet.Entry e = (EntrySet.Entry) i.next();
3954: State state = (State) e.getValue();
3955: if (state != null) {
3956: state.clearNonAutoSetFields();
3957: }
3958: }
3959: }
3960:
3961: public void setUserObject(Object o) {
3962: // ignore
3963: }
3964:
3965: }
|