Source Code Cross Referenced for BaseDataFileFactory.java in  » Database-DBMS » db-derby-10.2 » org » apache » derby » impl » store » raw » data » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Database DBMS » db derby 10.2 » org.apache.derby.impl.store.raw.data 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


0001:        /*
0002:
0003:           Derby - Class org.apache.derby.impl.store.raw.data.BaseDataFileFactory
0004:
0005:           Licensed to the Apache Software Foundation (ASF) under one or more
0006:           contributor license agreements.  See the NOTICE file distributed with
0007:           this work for additional information regarding copyright ownership.
0008:           The ASF licenses this file to you under the Apache License, Version 2.0
0009:           (the "License"); you may not use this file except in compliance with
0010:           the License.  You may obtain a copy of the License at
0011:
0012:              http://www.apache.org/licenses/LICENSE-2.0
0013:
0014:           Unless required by applicable law or agreed to in writing, software
0015:           distributed under the License is distributed on an "AS IS" BASIS,
0016:           WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017:           See the License for the specific language governing permissions and
0018:           limitations under the License.
0019:
0020:         */
0021:
0022:        package org.apache.derby.impl.store.raw.data;
0023:
0024:        import org.apache.derby.iapi.reference.SQLState;
0025:        import org.apache.derby.iapi.reference.MessageId;
0026:
0027:        import org.apache.derby.impl.store.raw.data.AllocationActions;
0028:        import org.apache.derby.impl.store.raw.data.BaseContainerHandle;
0029:        import org.apache.derby.impl.store.raw.data.BasePage;
0030:        import org.apache.derby.impl.store.raw.data.DirectActions;
0031:        import org.apache.derby.impl.store.raw.data.LoggableActions;
0032:        import org.apache.derby.impl.store.raw.data.PageActions;
0033:        import org.apache.derby.impl.store.raw.data.RecordId;
0034:        import org.apache.derby.impl.store.raw.data.ReclaimSpace;
0035:
0036:        import org.apache.derby.iapi.services.info.ProductVersionHolder;
0037:        import org.apache.derby.iapi.services.info.ProductGenusNames;
0038:
0039:        import org.apache.derby.iapi.services.cache.CacheFactory;
0040:        import org.apache.derby.iapi.services.cache.CacheManager;
0041:        import org.apache.derby.iapi.services.cache.Cacheable;
0042:        import org.apache.derby.iapi.services.cache.CacheableFactory;
0043:        import org.apache.derby.iapi.services.context.ContextManager;
0044:        import org.apache.derby.iapi.services.daemon.DaemonService;
0045:        import org.apache.derby.iapi.services.daemon.Serviceable;
0046:        import org.apache.derby.iapi.services.monitor.ModuleControl;
0047:        import org.apache.derby.iapi.services.monitor.ModuleSupportable;
0048:        import org.apache.derby.iapi.services.monitor.Monitor;
0049:        import org.apache.derby.iapi.services.monitor.PersistentService;
0050:        import org.apache.derby.iapi.services.diag.Performance;
0051:        import org.apache.derby.iapi.services.sanity.SanityManager;
0052:        import org.apache.derby.iapi.services.io.FormatIdUtil;
0053:        import org.apache.derby.iapi.services.stream.HeaderPrintWriter;
0054:
0055:        import org.apache.derby.iapi.error.StandardException;
0056:        import org.apache.derby.iapi.services.i18n.MessageService;
0057:        import org.apache.derby.iapi.store.access.AccessFactoryGlobals;
0058:        import org.apache.derby.iapi.store.access.FileResource;
0059:        import org.apache.derby.iapi.store.access.TransactionController;
0060:        import org.apache.derby.iapi.store.raw.data.DataFactory;
0061:        import org.apache.derby.iapi.store.raw.data.RawContainerHandle;
0062:        import org.apache.derby.iapi.store.raw.log.LogFactory;
0063:        import org.apache.derby.iapi.store.raw.log.LogInstant;
0064:        import org.apache.derby.iapi.store.raw.ContainerHandle;
0065:        import org.apache.derby.iapi.store.raw.ContainerKey;
0066:        import org.apache.derby.iapi.store.raw.LockingPolicy;
0067:        import org.apache.derby.iapi.store.raw.Page;
0068:        import org.apache.derby.iapi.store.raw.RawStoreFactory;
0069:        import org.apache.derby.iapi.store.raw.RecordHandle;
0070:        import org.apache.derby.iapi.store.raw.StreamContainerHandle;
0071:        import org.apache.derby.iapi.store.raw.Transaction;
0072:        import org.apache.derby.iapi.store.raw.xact.RawTransaction;
0073:
0074:        import org.apache.derby.iapi.store.access.RowSource;
0075:
0076:        import org.apache.derby.io.StorageFactory;
0077:        import org.apache.derby.io.WritableStorageFactory;
0078:        import org.apache.derby.io.StorageFile;
0079:        import org.apache.derby.io.StorageRandomAccessFile;
0080:        import org.apache.derby.iapi.services.uuid.UUIDFactory;
0081:        import org.apache.derby.catalog.UUID;
0082:        import org.apache.derby.iapi.reference.Attribute;
0083:        import org.apache.derby.iapi.reference.Property;
0084:        import org.apache.derby.iapi.reference.SQLState;
0085:        import org.apache.derby.iapi.util.ByteArray;
0086:        import org.apache.derby.iapi.services.io.FileUtil;
0087:        import org.apache.derby.iapi.util.CheapDateFormatter;
0088:        import org.apache.derby.iapi.util.ReuseFactory;
0089:        import org.apache.derby.iapi.services.property.PropertyUtil;
0090:
0091:        import java.util.Properties;
0092:        import java.util.Hashtable;
0093:        import java.util.Enumeration;
0094:
0095:        import java.io.File;
0096:        import java.io.FilePermission;
0097:        import java.io.OutputStream;
0098:        import java.io.IOException;
0099:
0100:        import java.security.AccessController;
0101:        import java.security.PrivilegedAction;
0102:        import java.security.PrivilegedExceptionAction;
0103:        import java.security.PrivilegedActionException;
0104:
0105:        /**
0106:
0107:         Provides the abstract class with most of the implementation of DataFactory and
0108:         ModuleControl shared by all the different filesystem implementations.
0109:         <p>
0110:         RESOLVE (mikem - 2/19/98) -
0111:         Currently only getContainerClass() is abstract, there are probably more 
0112:         routines which should be abstract.  Also the other implementations should 
0113:         probably inherit from the abstract class, rather than from the DataFileFactory
0114:         class.  Also there probably should be a generic directory and the rest of the
0115:         filesystem implementations parallel to it.
0116:         I wanted to limit the changes going into the branch and then fix 
0117:         inheritance stuff in main.
0118:         <p>
0119:         The code in this class was moved over from DataFileFactory.java and then
0120:         that file was made to inherit from this one.
0121:
0122:         **/
0123:
0124:        public final class BaseDataFileFactory implements  DataFactory,
0125:                CacheableFactory, ModuleControl, ModuleSupportable,
0126:                PrivilegedExceptionAction {
0127:
0128:            private String subSubProtocol;
0129:            StorageFactory storageFactory;
0130:
0131:            /* writableStorageFactory == (WritableStorageFactory) storageFactory if 
0132:             * storageFactory also implements WritableStorageFactory, null if the 
0133:             * storageFactory is read-only.
0134:             */
0135:            WritableStorageFactory writableStorageFactory;
0136:
0137:            private long nextContainerId = System.currentTimeMillis();
0138:            private boolean databaseEncrypted;
0139:
0140:            private CacheManager pageCache;
0141:            private CacheManager containerCache;
0142:
0143:            private LogFactory logFactory;
0144:
0145:            private ProductVersionHolder jbmsVersion;
0146:
0147:            private RawStoreFactory rawStoreFactory; // associated raw store factory
0148:
0149:            private String dataDirectory; // root directory of files.
0150:
0151:            private boolean throwDBlckException; // if true throw db.lck
0152:            // exception, even on systems
0153:            // where lock file is not
0154:            // guaranteed.
0155:
0156:            private UUID identifier; // unique id for locking 
0157:
0158:            private Object freezeSemaphore;
0159:
0160:            // is the data store frozen - protected by freezeSemaphore
0161:            private boolean isFrozen;
0162:
0163:            // how many writers are currently active in the data store - 
0164:            // protected by freezeSemaphore
0165:            private int writersInProgress;
0166:
0167:            private boolean removeStubsOK;
0168:            private boolean isCorrupt;
0169:
0170:            // the database is being created, no logging
0171:            private boolean inCreateNoLog;
0172:
0173:            // lock against other JBMS opening the same database
0174:            private StorageRandomAccessFile fileLockOnDB;
0175:            private StorageFile exFileLock; //file handle to get exclusive lock
0176:            private HeaderPrintWriter istream;
0177:            private static final String LINE = "----------------------------------------------------------------";
0178:
0179:            // disable syncing of data during page allocation.  DERBY-888 changes
0180:            // the system to not require data syncing at allocation.  
0181:            boolean dataNotSyncedAtAllocation = true;
0182:
0183:            // disable syncing of data during checkpoint.
0184:            boolean dataNotSyncedAtCheckpoint = false;
0185:
0186:            // these fields can be accessed directly by subclasses if it needs a
0187:            // different set of actions
0188:            private PageActions loggablePageActions;
0189:            private AllocationActions loggableAllocActions;
0190:
0191:            private boolean readOnly; // is this a read only data store
0192:            private boolean supportsRandomAccess;
0193:            private FileResource fileHandler; // my file handler, set by a 
0194:            // sub-class in its boot method.
0195:
0196:            //hash table to keep track of information about dropped containers stubs
0197:            private Hashtable droppedTableStubInfo;
0198:
0199:            private Hashtable postRecoveryRemovedFiles;
0200:
0201:            private EncryptData containerEncrypter;
0202:
0203:            // PrivilegedAction actions
0204:            private int actionCode;
0205:            private static final int GET_TEMP_DIRECTORY_ACTION = 1;
0206:            private static final int REMOVE_TEMP_DIRECTORY_ACTION = 2;
0207:            private static final int GET_CONTAINER_PATH_ACTION = 3;
0208:            private static final int GET_ALTERNATE_CONTAINER_PATH_ACTION = 4;
0209:            private static final int FIND_MAX_CONTAINER_ID_ACTION = 5;
0210:            private static final int DELETE_IF_EXISTS_ACTION = 6;
0211:            private static final int GET_PATH_ACTION = 7;
0212:            private static final int POST_RECOVERY_REMOVE_ACTION = 8;
0213:            private static final int REMOVE_STUBS_ACTION = 9;
0214:            private static final int BOOT_ACTION = 10;
0215:            private static final int GET_LOCK_ON_DB_ACTION = 11;
0216:            private static final int RELEASE_LOCK_ON_DB_ACTION = 12;
0217:            private static final int RESTORE_DATA_DIRECTORY_ACTION = 13;
0218:            private static final int GET_CONTAINER_NAMES_ACTION = 14;
0219:
0220:            private ContainerKey containerId;
0221:            private boolean stub;
0222:            private StorageFile actionFile;
0223:            private UUID myUUID;
0224:            private UUIDFactory uuidFactory;
0225:            private String databaseDirectory;
0226:
0227:            private String backupPath;
0228:            private File backupRoot;
0229:            private String[] bfilelist;
0230:
0231:            /*
0232:             ** Constructor
0233:             */
0234:
0235:            public BaseDataFileFactory() {
0236:            }
0237:
0238:            /*
0239:             ** Methods of ModuleControl
0240:             */
0241:
0242:            public boolean canSupport(Properties startParams) {
0243:
0244:                String serviceType = startParams
0245:                        .getProperty(PersistentService.TYPE);
0246:                if (serviceType == null)
0247:                    return false;
0248:
0249:                if (!handleServiceType(serviceType))
0250:                    return false;
0251:
0252:                if (startParams.getProperty(PersistentService.ROOT) == null)
0253:                    return false;
0254:
0255:                return true;
0256:            }
0257:
0258:            public void boot(boolean create, Properties startParams)
0259:                    throws StandardException {
0260:
0261:                jbmsVersion = Monitor.getMonitor().getEngineVersion();
0262:
0263:                dataDirectory = startParams.getProperty(PersistentService.ROOT);
0264:
0265:                UUIDFactory uf = Monitor.getMonitor().getUUIDFactory();
0266:
0267:                identifier = uf.createUUID();
0268:
0269:                PersistentService ps = Monitor.getMonitor()
0270:                        .getServiceType(this );
0271:
0272:                try {
0273:                    storageFactory = ps
0274:                            .getStorageFactoryInstance(
0275:                                    true,
0276:                                    dataDirectory,
0277:                                    startParams
0278:                                            .getProperty(
0279:                                                    Property.STORAGE_TEMP_DIRECTORY,
0280:                                                    PropertyUtil
0281:                                                            .getSystemProperty(Property.STORAGE_TEMP_DIRECTORY)),
0282:                                    identifier.toANSIidentifier());
0283:                } catch (IOException ioe) {
0284:                    if (create) {
0285:                        throw StandardException.newException(
0286:                                SQLState.SERVICE_DIRECTORY_CREATE_ERROR, ioe,
0287:                                dataDirectory);
0288:                    } else {
0289:                        throw StandardException
0290:                                .newException(SQLState.DATABASE_NOT_FOUND, ioe,
0291:                                        dataDirectory);
0292:                    }
0293:                }
0294:
0295:                if (storageFactory instanceof  WritableStorageFactory)
0296:                    writableStorageFactory = (WritableStorageFactory) storageFactory;
0297:
0298:                actionCode = BOOT_ACTION;
0299:
0300:                try {
0301:                    AccessController.doPrivileged(this );
0302:                } catch (PrivilegedActionException pae) {
0303:                    // BOOT_ACTION does not throw any exceptions.
0304:                }
0305:
0306:                String value = startParams
0307:                        .getProperty(
0308:                                Property.FORCE_DATABASE_LOCK,
0309:                                PropertyUtil
0310:                                        .getSystemProperty(Property.FORCE_DATABASE_LOCK));
0311:                throwDBlckException = Boolean.valueOf(
0312:                        (value != null ? value.trim() : value)).booleanValue();
0313:
0314:                if (!isReadOnly()) // read only db, not interested in filelock
0315:                    getJBMSLockOnDB(identifier, uf, dataDirectory);
0316:
0317:                //If the database is being restored/created from backup
0318:                //the restore the data directory(seg*) from backup
0319:                String restoreFrom = null;
0320:                restoreFrom = startParams.getProperty(Attribute.CREATE_FROM);
0321:                if (restoreFrom == null)
0322:                    restoreFrom = startParams
0323:                            .getProperty(Attribute.RESTORE_FROM);
0324:                if (restoreFrom == null)
0325:                    restoreFrom = startParams
0326:                            .getProperty(Attribute.ROLL_FORWARD_RECOVERY_FROM);
0327:
0328:                if (restoreFrom != null) {
0329:                    try {
0330:                        // restoreFrom and createFrom operations also need to know if database 
0331:                        // is encrypted
0332:                        String dataEncryption = startParams
0333:                                .getProperty(Attribute.DATA_ENCRYPTION);
0334:                        databaseEncrypted = Boolean.valueOf(dataEncryption)
0335:                                .booleanValue();
0336:                        restoreDataDirectory(restoreFrom);
0337:                    } catch (StandardException se) {
0338:                        releaseJBMSLockOnDB();
0339:                        throw se;
0340:                    }
0341:                }
0342:
0343:                logMsg(LINE);
0344:                long bootTime = System.currentTimeMillis();
0345:                String readOnlyMsg = (isReadOnly()) ? MessageService
0346:                        .getTextMessage(MessageId.STORE_BOOT_READONLY_MSG) : "";
0347:
0348:                logMsg(CheapDateFormatter.formatDate(bootTime)
0349:                        + MessageService.getTextMessage(
0350:                                MessageId.STORE_BOOT_MSG, jbmsVersion,
0351:                                identifier, dataDirectory, readOnlyMsg));
0352:
0353:                uf = null;
0354:
0355:                CacheFactory cf = (CacheFactory) Monitor
0356:                        .startSystemModule(org.apache.derby.iapi.reference.Module.CacheFactory);
0357:
0358:                int pageCacheSize = getIntParameter(
0359:                        RawStoreFactory.PAGE_CACHE_SIZE_PARAMETER, null,
0360:                        RawStoreFactory.PAGE_CACHE_SIZE_DEFAULT,
0361:                        RawStoreFactory.PAGE_CACHE_SIZE_MINIMUM,
0362:                        RawStoreFactory.PAGE_CACHE_SIZE_MAXIMUM);
0363:
0364:                pageCache = cf.newCacheManager(this , "PageCache",
0365:                        pageCacheSize / 2, pageCacheSize);
0366:
0367:                int fileCacheSize = getIntParameter(
0368:                        "derby.storage.fileCacheSize", null, 100, 2, 100);
0369:
0370:                containerCache = cf.newCacheManager(this , "ContainerCache",
0371:                        fileCacheSize / 2, fileCacheSize);
0372:
0373:                if (create) {
0374:                    String noLog = startParams
0375:                            .getProperty(Property.CREATE_WITH_NO_LOG);
0376:
0377:                    inCreateNoLog = (noLog != null && Boolean.valueOf(noLog)
0378:                            .booleanValue());
0379:
0380:                }
0381:
0382:                freezeSemaphore = new Object();
0383:
0384:                droppedTableStubInfo = new Hashtable();
0385:
0386:                // If derby.system.durability=test then set flags to disable sync of
0387:                // data pages at allocation when file is grown, disable sync of data
0388:                // writes during checkpoint
0389:                if (Property.DURABILITY_TESTMODE_NO_SYNC
0390:                        .equalsIgnoreCase(PropertyUtil
0391:                                .getSystemProperty(Property.DURABILITY_PROPERTY))) {
0392:                    // - disable syncing of data during checkpoint.
0393:                    dataNotSyncedAtCheckpoint = true;
0394:
0395:                    // log message stating that derby.system.durability
0396:                    // is set to a mode, where syncs wont be forced and the
0397:                    // possible consequences of setting this mode
0398:                    Monitor.logMessage(MessageService.getTextMessage(
0399:                            MessageId.STORE_DURABILITY_TESTMODE_NO_SYNC,
0400:                            Property.DURABILITY_PROPERTY,
0401:                            Property.DURABILITY_TESTMODE_NO_SYNC));
0402:                } else if (Performance.MEASURE) {
0403:                    // development build only feature, must by hand set the 
0404:                    // Performance.MEASURE variable and rebuild.  Useful during
0405:                    // development to compare/contrast effect of syncing, release
0406:                    // users can use the above relaxed durability option to disable
0407:                    // all syncing.  
0408:
0409:                    // debug only flag - disable syncing of data during checkpoint.
0410:                    dataNotSyncedAtCheckpoint = PropertyUtil
0411:                            .getSystemBoolean(Property.STORAGE_DATA_NOT_SYNCED_AT_CHECKPOINT);
0412:
0413:                    if (dataNotSyncedAtCheckpoint)
0414:                        Monitor
0415:                                .logMessage("Warning: "
0416:                                        + Property.STORAGE_DATA_NOT_SYNCED_AT_CHECKPOINT
0417:                                        + "set to true.");
0418:                }
0419:
0420:                fileHandler = new RFResource(this );
0421:            } // end of boot
0422:
0423:            public void stop() {
0424:                boolean OK = false;
0425:
0426:                if (rawStoreFactory != null) {
0427:                    DaemonService rawStoreDaemon = rawStoreFactory.getDaemon();
0428:                    if (rawStoreDaemon != null)
0429:                        rawStoreDaemon.stop();
0430:                }
0431:
0432:                long shutdownTime = System.currentTimeMillis();
0433:                logMsg("\n"
0434:                        + CheapDateFormatter.formatDate(shutdownTime)
0435:                        + MessageService.getTextMessage(
0436:                                MessageId.STORE_SHUTDOWN_MSG, getIdentifier()));
0437:                istream.println(LINE);
0438:
0439:                if (!isCorrupt) {
0440:                    try {
0441:                        if (pageCache != null && containerCache != null) {
0442:                            pageCache.shutdown();
0443:                            containerCache.shutdown();
0444:
0445:                            OK = true;
0446:                        }
0447:
0448:                    } catch (StandardException se) {
0449:                        se.printStackTrace(istream.getPrintWriter());
0450:                    }
0451:                }
0452:
0453:                removeTempDirectory();
0454:
0455:                if (isReadOnly()) // do enough to close all files, then return 
0456:                {
0457:                    return;
0458:                }
0459:
0460:                // re-enable stub removal until a better method can be found.
0461:                // only remove stub if caches are cleaned
0462:                if (removeStubsOK && OK)
0463:                    removeStubs();
0464:
0465:                releaseJBMSLockOnDB();
0466:            } // end of stop
0467:
0468:            /*
0469:             ** CacheableFactory
0470:             */
0471:            public Cacheable newCacheable(CacheManager cm) {
0472:                if (cm == pageCache) {
0473:                    StoredPage sp = new StoredPage();
0474:                    sp.setFactory(this );
0475:                    return sp;
0476:                }
0477:
0478:                // container cache
0479:                return newContainerObject();
0480:            }
0481:
0482:            /**
0483:            	Database creation finished
0484:
0485:            	@exception StandardException Standard cloudscape exception policy.
0486:             */
0487:            public void createFinished() throws StandardException {
0488:                if (!inCreateNoLog) {
0489:                    throw StandardException
0490:                            .newException(SQLState.FILE_DATABASE_NOT_IN_CREATE);
0491:                }
0492:
0493:                // the changes in cache are not logged, they have to be flushed to disk
0494:                checkpoint();
0495:                inCreateNoLog = false;
0496:            }
0497:
0498:            /*
0499:             ** Methods of DataFactory
0500:             */
0501:
0502:            public ContainerHandle openContainer(RawTransaction t,
0503:                    ContainerKey containerId, LockingPolicy locking, int mode)
0504:                    throws StandardException {
0505:                return openContainer(t, containerId, locking, mode, false /* is not dropped */);
0506:            }
0507:
0508:            /**
0509:            	@see DataFactory#openDroppedContainer
0510:            	@exception StandardException Standard Cloudscape error policy
0511:             */
0512:            public RawContainerHandle openDroppedContainer(RawTransaction t,
0513:                    ContainerKey containerId, LockingPolicy locking, int mode)
0514:                    throws StandardException {
0515:                // since we are opening a possible dropped container
0516:                // lets not add any actions that will take palce on a commit.
0517:                mode |= ContainerHandle.MODE_NO_ACTIONS_ON_COMMIT;
0518:
0519:                return openContainer(t, containerId, locking, mode, true /* droppedOK */);
0520:            }
0521:
0522:            /**
0523:            	@see DataFactory#openContainer
0524:            	@exception StandardException Standard Cloudscape error policy
0525:             */
0526:            private RawContainerHandle openContainer(RawTransaction t,
0527:                    ContainerKey identity, LockingPolicy locking, int mode,
0528:                    boolean droppedOK) throws StandardException {
0529:
0530:                if (SanityManager.DEBUG) {
0531:
0532:                    if ((mode & (ContainerHandle.MODE_READONLY | ContainerHandle.MODE_FORUPDATE)) == (ContainerHandle.MODE_READONLY | ContainerHandle.MODE_FORUPDATE)) {
0533:                        SanityManager
0534:                                .THROWASSERT("update and readonly mode specified");
0535:                    }
0536:
0537:                }
0538:
0539:                boolean waitForLock = ((mode & ContainerHandle.MODE_LOCK_NOWAIT) == 0);
0540:
0541:                if ((mode & ContainerHandle.MODE_OPEN_FOR_LOCK_ONLY) != 0) {
0542:                    // Open a container for lock only, we don't care if it exists, is 
0543:                    // deleted or anything about it. The container handle we return is
0544:                    // closed and cannot be used for fetch or update etc.
0545:                    BaseContainerHandle lockOnlyHandle = new BaseContainerHandle(
0546:                            getIdentifier(), t, identity, locking, mode);
0547:
0548:                    if (lockOnlyHandle.useContainer(true, waitForLock))
0549:                        return lockOnlyHandle;
0550:                    else
0551:                        return null;
0552:                }
0553:
0554:                BaseContainerHandle c;
0555:
0556:                // see if the container exists	
0557:                FileContainer container = (FileContainer) containerCache
0558:                        .find(identity);
0559:                if (container == null)
0560:                    return null;
0561:
0562:                if (identity.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT) {
0563:
0564:                    if (SanityManager.DEBUG) {
0565:                        SanityManager
0566:                                .ASSERT(container instanceof  TempRAFContainer);
0567:                    }
0568:
0569:                    if ((mode & ContainerHandle.MODE_TEMP_IS_KEPT) == ContainerHandle.MODE_TEMP_IS_KEPT) {
0570:                        // if the mode is kept, then, we do not want to truncate 
0571:                        mode |= ContainerHandle.MODE_UNLOGGED;
0572:                    } else {
0573:                        // this should be OK even if the table was opened read-only
0574:                        mode |= (ContainerHandle.MODE_UNLOGGED | ContainerHandle.MODE_TRUNCATE_ON_ROLLBACK);
0575:                    }
0576:
0577:                    locking = t.newLockingPolicy(LockingPolicy.MODE_NONE,
0578:                            TransactionController.ISOLATION_NOLOCK, true);
0579:                } else {
0580:                    // real tables
0581:                    if (inCreateNoLog) {
0582:                        mode |= (ContainerHandle.MODE_UNLOGGED | ContainerHandle.MODE_CREATE_UNLOGGED);
0583:                    } else {
0584:
0585:                        // make sure everything is logged if logArchived is turn on
0586:                        // clear all UNLOGGED flag
0587:                        if (logFactory.logArchived()) {
0588:                            mode &= ~(ContainerHandle.MODE_UNLOGGED | ContainerHandle.MODE_CREATE_UNLOGGED);
0589:
0590:                        } else {
0591:
0592:                            // block the online backup if the container is being 
0593:                            // opened in unlogged mode, if the backup is already 
0594:                            // running then convert all unlogged opens to logged ones,
0595:                            // otherwise online backup copy will be inconsistent.
0596:
0597:                            if (((mode & ContainerHandle.MODE_UNLOGGED) == ContainerHandle.MODE_UNLOGGED)
0598:                                    || ((mode & ContainerHandle.MODE_CREATE_UNLOGGED) == ContainerHandle.MODE_CREATE_UNLOGGED)) {
0599:                                if (!t.blockBackup(false)) {
0600:                                    // when a backup is in progress transaction can not
0601:                                    // block the backup, so convert  unlogged opens 
0602:                                    // to logged mode.
0603:                                    mode &= ~(ContainerHandle.MODE_UNLOGGED | ContainerHandle.MODE_CREATE_UNLOGGED);
0604:                                }
0605:                            }
0606:
0607:                        }
0608:
0609:                    }
0610:
0611:                    // if mode is UNLOGGED but not CREATE_UNLOGGED, then force the
0612:                    // container from cache when the transaction commits.  For
0613:                    // CREATE_UNLOGGED, client has the responsibility of forcing the
0614:                    // cache. 
0615:                    if (((mode & ContainerHandle.MODE_UNLOGGED) == ContainerHandle.MODE_UNLOGGED)
0616:                            && ((mode & ContainerHandle.MODE_CREATE_UNLOGGED) == 0)) {
0617:                        mode |= ContainerHandle.MODE_FLUSH_ON_COMMIT;
0618:                    }
0619:                }
0620:
0621:                PageActions pageActions = null;
0622:                AllocationActions allocActions = null;
0623:
0624:                if ((mode & ContainerHandle.MODE_FORUPDATE) == ContainerHandle.MODE_FORUPDATE) {
0625:
0626:                    if ((mode & ContainerHandle.MODE_UNLOGGED) == 0) {
0627:                        // get the current loggable actions
0628:                        pageActions = getLoggablePageActions();
0629:                        allocActions = getLoggableAllocationActions();
0630:
0631:                    } else {
0632:                        // unlogged
0633:                        pageActions = new DirectActions();
0634:                        allocActions = new DirectAllocActions();
0635:                    }
0636:                }
0637:
0638:                c = new BaseContainerHandle(getIdentifier(), t, pageActions,
0639:                        allocActions, locking, container, mode);
0640:
0641:                // see if we can use the container
0642:                try {
0643:                    if (!c.useContainer(droppedOK, waitForLock)) {
0644:                        containerCache.release(container);
0645:                        return null;
0646:                    }
0647:                } catch (StandardException se) {
0648:                    containerCache.release(container);
0649:                    throw se;
0650:                }
0651:
0652:                return c;
0653:            }
0654:
0655:            /** Add a container with a specified page size to a segment.
0656:            	@exception StandardException Standard Cloudscape error policy
0657:             */
0658:            public long addContainer(RawTransaction t, long segmentId,
0659:                    long input_containerid, int mode,
0660:                    Properties tableProperties, int temporaryFlag)
0661:                    throws StandardException {
0662:                if (SanityManager.DEBUG) {
0663:                    if ((mode & ContainerHandle.MODE_CREATE_UNLOGGED) != 0)
0664:                        SanityManager
0665:                                .ASSERT(
0666:                                        (mode & ContainerHandle.MODE_UNLOGGED) != 0,
0667:                                        "cannot have CREATE_UNLOGGED set but UNLOGGED not set");
0668:                }
0669:
0670:                // If client has provided a containerid then use it, else use the 
0671:                // internally generated one from getNextId().
0672:                long containerId = ((input_containerid != ContainerHandle.DEFAULT_ASSIGN_ID) ? input_containerid
0673:                        : getNextId());
0674:
0675:                ContainerKey identity = new ContainerKey(segmentId, containerId);
0676:
0677:                boolean tmpContainer = (segmentId == ContainerHandle.TEMPORARY_SEGMENT);
0678:
0679:                ContainerHandle ch = null;
0680:                LockingPolicy cl = null;
0681:
0682:                if (!tmpContainer) {
0683:                    // lock the container before we create it.
0684:
0685:                    if (isReadOnly()) {
0686:                        throw StandardException
0687:                                .newException(SQLState.DATA_CONTAINER_READ_ONLY);
0688:                    }
0689:
0690:                    cl = t.newLockingPolicy(LockingPolicy.MODE_CONTAINER,
0691:                            TransactionController.ISOLATION_SERIALIZABLE, true);
0692:
0693:                    if (SanityManager.DEBUG)
0694:                        SanityManager.ASSERT(cl != null);
0695:
0696:                    ch = t
0697:                            .openContainer(
0698:                                    identity,
0699:                                    cl,
0700:                                    (ContainerHandle.MODE_FORUPDATE | ContainerHandle.MODE_OPEN_FOR_LOCK_ONLY));
0701:                }
0702:
0703:                FileContainer container = (FileContainer) containerCache
0704:                        .create(identity, tableProperties);
0705:
0706:                // create the first alloc page and the first user page, 
0707:                // if this fails for any reason the transaction
0708:                // will roll back and the container will be dropped (removed)
0709:                ContainerHandle containerHdl = null;
0710:                Page firstPage = null;
0711:
0712:                try {
0713:                    // if opening a temporary container with IS_KEPT flag set,
0714:                    // make sure to open it with IS_KEPT too.
0715:                    if (tmpContainer
0716:                            && ((temporaryFlag & TransactionController.IS_KEPT) == TransactionController.IS_KEPT)) {
0717:
0718:                        mode |= ContainerHandle.MODE_TEMP_IS_KEPT;
0719:                    }
0720:
0721:                    // open no-locking as we already have the container locked
0722:                    containerHdl = t.openContainer(identity, null,
0723:                            (ContainerHandle.MODE_FORUPDATE | mode));
0724:
0725:                    // we just added it, containerHdl should not be null
0726:                    if (SanityManager.DEBUG)
0727:                        SanityManager.ASSERT(containerHdl != null);
0728:
0729:                    if (!tmpContainer) {
0730:                        // make it persistent (in concept if not in reality)
0731:                        RawContainerHandle rch = (RawContainerHandle) containerHdl;
0732:
0733:                        ContainerOperation lop = new ContainerOperation(rch,
0734:                                ContainerOperation.CREATE);
0735:
0736:                        // mark the container as pre-dirtied so that if a checkpoint
0737:                        // happens after the log record is sent to the log stream, the
0738:                        // cache cleaning will wait for this change.
0739:                        rch.preDirty(true);
0740:                        try {
0741:                            t.logAndDo(lop);
0742:
0743:                            // flush the log to reduce the window between where
0744:                            // the container is created & synced and the log record
0745:                            // for it makes it to disk. If we fail in this
0746:                            // window we will leave a stranded container file.
0747:                            flush(t.getLastLogInstant());
0748:                        } finally {
0749:                            // in case logAndDo fail, make sure the container is not
0750:                            // stuck in preDirty state.
0751:                            rch.preDirty(false);
0752:                        }
0753:                    }
0754:
0755:                    firstPage = containerHdl.addPage();
0756:
0757:                } finally {
0758:
0759:                    if (firstPage != null) {
0760:                        firstPage.unlatch();
0761:                        firstPage = null;
0762:                    }
0763:
0764:                    containerCache.release(container);
0765:
0766:                    if (containerHdl != null) {
0767:                        containerHdl.close();
0768:                        containerHdl = null;
0769:                    }
0770:
0771:                    if (!tmpContainer) {
0772:                        // this should do nothing, since we requested isolation 3
0773:                        // but we can't assume that, so call the policy correctly.
0774:
0775:                        cl.unlockContainer(t, ch);
0776:                    }
0777:                }
0778:
0779:                return containerId;
0780:            }
0781:
0782:            /** Add and load a stream container
0783:            	@exception StandardException Standard Cloudscape error policy
0784:             */
0785:            public long addAndLoadStreamContainer(RawTransaction t,
0786:                    long segmentId, Properties tableProperties,
0787:                    RowSource rowSource) throws StandardException {
0788:                long containerId = getNextId();
0789:
0790:                ContainerKey identity = new ContainerKey(segmentId, containerId);
0791:
0792:                // create and load the stream container
0793:                StreamFileContainer sContainer = new StreamFileContainer(
0794:                        identity, this , tableProperties);
0795:                sContainer.load(rowSource);
0796:
0797:                return containerId;
0798:            }
0799:
0800:            /**
0801:            	open an exsisting streamContainer
0802:
0803:            	@see DataFactory#openStreamContainer
0804:            	@exception StandardException Standard Cloudscape error policy
0805:             */
0806:            public StreamContainerHandle openStreamContainer(RawTransaction t,
0807:                    long segmentId, long containerId, boolean hold)
0808:                    throws StandardException {
0809:
0810:                ContainerKey identity = new ContainerKey(segmentId, containerId);
0811:
0812:                StreamFileContainerHandle c;
0813:
0814:                // open the container with the identity
0815:                StreamFileContainer container = new StreamFileContainer(
0816:                        identity, this );
0817:                container = container.open(false);
0818:                if (container == null)
0819:                    return null;
0820:
0821:                c = new StreamFileContainerHandle(getIdentifier(), t,
0822:                        container, hold);
0823:
0824:                // see if we can use the container
0825:                if (c.useContainer())
0826:                    return c;
0827:                else
0828:                    return null;
0829:            }
0830:
0831:            /**
0832:            	Drop a stream container.
0833:
0834:                <P><B>Synchronisation</B>
0835:            	<P>
0836:            	This call will remove the container.
0837:
0838:            	@exception StandardException Standard Cloudscape error policy
0839:             */
0840:            public void dropStreamContainer(RawTransaction t, long segmentId,
0841:                    long containerId) throws StandardException {
0842:
0843:                boolean tmpContainer = (segmentId == ContainerHandle.TEMPORARY_SEGMENT);
0844:
0845:                StreamContainerHandle containerHdl = null;
0846:
0847:                try {
0848:                    ContainerKey ckey = new ContainerKey(segmentId, containerId);
0849:
0850:                    // close all open containers and 'onCommit' objects of the container
0851:                    t.notifyObservers(ckey);
0852:
0853:                    containerHdl = t.openStreamContainer(segmentId,
0854:                            containerId, false);
0855:                    if (tmpContainer && (containerHdl != null)) {
0856:                        containerHdl.removeContainer();
0857:                        return;
0858:                    }
0859:                } finally {
0860:                    if (containerHdl != null)
0861:                        containerHdl.close();
0862:                }
0863:            }
0864:
0865:            /**
0866:            	re-Create a container during redo recovery.
0867:
0868:            	called ONLY during recovery load tran.
0869:
0870:            	@exception StandardException Standard Cloudscape Error policy
0871:             */
0872:            public void reCreateContainerForRedoRecovery(RawTransaction t,
0873:                    long segmentId, long containerId, ByteArray containerInfo)
0874:                    throws StandardException {
0875:                if (SanityManager.DEBUG)
0876:                    SanityManager.ASSERT(
0877:                            segmentId != ContainerHandle.TEMPORARY_SEGMENT,
0878:                            "Cannot recreate temp container during load tran");
0879:
0880:                ContainerKey identity = new ContainerKey(segmentId, containerId);
0881:
0882:                // no need to lock container during load tran
0883:                // no need to create any page for the container, they will be created
0884:                // as their log records are encountered later in load tran
0885:
0886:                FileContainer container = (FileContainer) containerCache
0887:                        .create(identity, containerInfo);
0888:
0889:                containerCache.release(container);
0890:            }
0891:
0892:            /**
0893:            	Drop a container.
0894:
0895:                <P><B>Synchronisation</B>
0896:            	<P>
0897:            	This call will mark the container as dropped and then obtain an CX lock
0898:            	(table level exclusive lock) on the container. Once a container has 
0899:                been marked as dropped it cannot be retrieved by an openContainer() 
0900:                call unless explicitly with droppedOK.
0901:            	<P>
0902:            	Once the exclusive lock has been obtained the container is removed
0903:            	and all its pages deallocated. The container will be fully removed
0904:            	at the commit time of the transaction.
0905:
0906:            	@exception StandardException Standard Cloudscape error policy
0907:             */
0908:            public void dropContainer(RawTransaction t, ContainerKey ckey)
0909:                    throws StandardException {
0910:                boolean tmpContainer = (ckey.getSegmentId() == ContainerHandle.TEMPORARY_SEGMENT);
0911:
0912:                LockingPolicy cl = null;
0913:
0914:                if (!tmpContainer) {
0915:                    if (isReadOnly()) {
0916:                        throw StandardException
0917:                                .newException(SQLState.DATA_CONTAINER_READ_ONLY);
0918:                    }
0919:
0920:                    cl = t.newLockingPolicy(LockingPolicy.MODE_CONTAINER,
0921:                            TransactionController.ISOLATION_SERIALIZABLE, true);
0922:
0923:                    if (SanityManager.DEBUG)
0924:                        SanityManager.ASSERT(cl != null);
0925:                }
0926:
0927:                // close all open containers and 'onCommit' objects of this container
0928:                t.notifyObservers(ckey);
0929:
0930:                RawContainerHandle containerHdl = (RawContainerHandle) t
0931:                        .openContainer(ckey, cl, ContainerHandle.MODE_FORUPDATE);
0932:
0933:                // If container is already dropped or is no longer there, throw
0934:                // containerVanished exception unless container is temporary, in that
0935:                // case just return.  Upper layer is supposed to prevent such from
0936:                // happening thru some means other than the lock we are getting here.
0937:                try {
0938:                    if (containerHdl == null
0939:                            || containerHdl.getContainerStatus() != RawContainerHandle.NORMAL) {
0940:                        // If we are a temp container, don't worry about it.
0941:                        if (tmpContainer) {
0942:                            if (containerHdl != null)
0943:                                containerHdl.removeContainer((LogInstant) null);
0944:                            return;
0945:                        } else {
0946:                            throw StandardException.newException(
0947:                                    SQLState.DATA_CONTAINER_VANISHED, ckey);
0948:                        }
0949:                    }
0950:
0951:                    // Container exist, is updatable and we got the lock.
0952:                    if (tmpContainer) {
0953:                        containerHdl.dropContainer((LogInstant) null, true);
0954:                        containerHdl.removeContainer((LogInstant) null);
0955:                    } else {
0956:                        ContainerOperation lop = new ContainerOperation(
0957:                                containerHdl, ContainerOperation.DROP);
0958:
0959:                        // mark the container as pre-dirtied so that if a checkpoint
0960:                        // happens after the log record is sent to the log stream, the
0961:                        // cache cleaning will wait for this change.
0962:                        containerHdl.preDirty(true);
0963:                        try {
0964:                            t.logAndDo(lop);
0965:                        } finally {
0966:                            // in case logAndDo fail, make sure the container is not
0967:                            // stuck in preDirty state.
0968:                            containerHdl.preDirty(false);
0969:                        }
0970:
0971:                        // remember this as a post commit work item
0972:                        Serviceable p = new ReclaimSpace(
0973:                                ReclaimSpace.CONTAINER, ckey, this , true /* service ASAP */);
0974:
0975:                        if (SanityManager.DEBUG) {
0976:                            if (SanityManager
0977:                                    .DEBUG_ON(DaemonService.DaemonTrace)) {
0978:                                SanityManager.DEBUG(DaemonService.DaemonTrace,
0979:                                        "Add post commit work " + p);
0980:                            }
0981:                        }
0982:
0983:                        t.addPostCommitWork(p);
0984:                    }
0985:
0986:                } finally {
0987:                    if (containerHdl != null)
0988:                        containerHdl.close();
0989:                }
0990:
0991:            }
0992:
0993:            /**
0994:             * Implement checkpoint operation, write/sync all pages in cache.
0995:             * <p>
0996:             * The derby write ahead log algorithm uses checkpoint of the data
0997:             * cache to determine points of the log no longer required by
0998:             * restart recovery.  
0999:             * <p>
1000:             * This implementation uses the 2 cache interfaces to force all dirty
1001:             * pages to disk:
1002:             *
1003:             * WRITE DIRTY PAGES TO OS:
1004:             * In the first step all pages in the page cache
1005:             * are written, but not synced (pagecache.cleanAll).  The cachemanager
1006:             * cleanAll() interface guarantees that every dirty page that exists
1007:             * when this call is first made will have it's clean() method called.
1008:             * The data cache (CachedPage.clean()), will call writePage but not
1009:             * sync the page.  
1010:             * By using the java write then sync, the checkpoint is
1011:             * usually doing async I/O, allowing the OS to schedule multiple I/O's
1012:             * to the file as efficiently as it can.
1013:             * Note that it has been observed that checkpoints
1014:             * can flood the I/O system because these writes are not synced, see
1015:             * DERBY-799 - checkpoint should probably somehow restrict the rate
1016:             * it sends out those I/O's - it was observed a simple sleep every
1017:             * N writes fixed most of the problem.  
1018:             *
1019:             * FORCE THOSE DIRTY WRITES TO DISK:
1020:             * To force the I/O's to disk, the system calls each open dirty file
1021:             * and uses the java interface to sync any outstanding dirty pages to
1022:             * disk (containerCache.cleanAll()).  The open container cache does
1023:             * this work in RAFContainer.clean() by writing it's header out and
1024:             * syncing the file.  (Note if any change is made to checkpoint to
1025:             * sync the writes vs. syncing the file, one probably still needs to 
1026:             * write the container header out and sync it).
1027:             *
1028:             * @exception  StandardException  Standard exception policy.
1029:             **/
1030:            public void checkpoint() throws StandardException {
1031:                pageCache.cleanAll();
1032:                containerCache.cleanAll();
1033:            }
1034:
1035:            public void idle() throws StandardException {
1036:                pageCache.ageOut();
1037:                containerCache.ageOut();
1038:            }
1039:
1040:            public void setRawStoreFactory(RawStoreFactory rsf, boolean create,
1041:                    Properties startParams) throws StandardException {
1042:
1043:                rawStoreFactory = rsf;
1044:
1045:                /*
1046:                 * boot the log factory here because different implementation of the
1047:                 * data	factory wants different types of log factory
1048:                 */
1049:                bootLogFactory(create, startParams);
1050:
1051:            }
1052:
1053:            /**
1054:            	Return my unique identifier
1055:
1056:            	@see DataFactory#getIdentifier
1057:             */
1058:            public UUID getIdentifier() {
1059:                return identifier;
1060:            }
1061:
1062:            /*
1063:             ** Called by post commit daemon, calling ReclaimSpace.performWork()
1064:             */
1065:            public int reclaimSpace(Serviceable work, ContextManager contextMgr)
1066:                    throws StandardException {
1067:                if (work == null)
1068:                    return Serviceable.DONE;
1069:
1070:                Transaction tran = rawStoreFactory.findUserTransaction(
1071:                        contextMgr, AccessFactoryGlobals.SYS_TRANS_NAME);
1072:
1073:                if (SanityManager.DEBUG) {
1074:                    SanityManager.ASSERT(tran != null, "null transaction");
1075:
1076:                    if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace))
1077:                        SanityManager.DEBUG(DaemonService.DaemonTrace,
1078:                                "Performing post commit work " + work);
1079:                }
1080:
1081:                return ReclaimSpaceHelper.reclaimSpace(this ,
1082:                        (RawTransaction) tran, (ReclaimSpace) work);
1083:            }
1084:
1085:            /**
1086:            	Really this is just a convience routine for callers that might not
1087:            	have access to a log factory.
1088:             */
1089:            public StandardException markCorrupt(StandardException originalError) {
1090:                boolean firsttime = !isCorrupt;
1091:
1092:                isCorrupt = true;
1093:                if (getLogFactory() != null)
1094:                    getLogFactory().markCorrupt(originalError);
1095:
1096:                // if firsttime markCorrupt is called, release the JBMS lock so user
1097:                // can move the database if so desired.
1098:                if (firsttime) {
1099:                    // get rid of everything from the cache without first cleaning them
1100:                    if (pageCache != null)
1101:                        pageCache.discard(null);
1102:
1103:                    if (containerCache != null)
1104:                        containerCache.discard(null);
1105:
1106:                    // don't read in any more pages 
1107:                    pageCache = null;
1108:                    containerCache = null;
1109:
1110:                    releaseJBMSLockOnDB();
1111:                }
1112:
1113:                return originalError;
1114:            }
1115:
1116:            public FileResource getFileHandler() {
1117:                return fileHandler;
1118:            }
1119:
1120:            public void removeStubsOK() {
1121:                removeStubsOK = true;
1122:            }
1123:
1124:            /*
1125:             ** Implementation specific methods
1126:             */
1127:
1128:            public int getIntParameter(String parameterName,
1129:                    Properties properties, int defaultValue, int minimumValue,
1130:                    int maximumValue) {
1131:
1132:                int newValue;
1133:
1134:                String parameter = null;
1135:
1136:                if (properties != null)
1137:                    parameter = properties.getProperty(parameterName);
1138:
1139:                if (parameter == null)
1140:                    parameter = PropertyUtil.getSystemProperty(parameterName);
1141:
1142:                if (parameter != null) {
1143:                    try {
1144:                        newValue = Integer.parseInt(parameter);
1145:
1146:                        if ((newValue >= minimumValue)
1147:                                && (newValue <= maximumValue))
1148:                            return newValue;
1149:                    } catch (NumberFormatException nfe) {
1150:                        // just leave the size at the default.				
1151:                    }
1152:                }
1153:
1154:                return defaultValue;
1155:            }
1156:
1157:            CacheManager getContainerCache() {
1158:                return containerCache;
1159:            }
1160:
1161:            CacheManager getPageCache() {
1162:                return pageCache;
1163:            }
1164:
1165:            public long[] getCacheStats(String cacheName) {
1166:
1167:                if (cacheName == null) {
1168:                    // cache name is not specified, return the default.
1169:                    return getPageCache().getCacheStats();
1170:                }
1171:
1172:                if (cacheName.equals("pageCache")) {
1173:                    return getPageCache().getCacheStats();
1174:                } else {
1175:                    // return default set of cache.
1176:                    return getPageCache().getCacheStats();
1177:                }
1178:            }
1179:
1180:            public void resetCacheStats(String cacheName) {
1181:                if (cacheName == null) {
1182:                    // cache name is not specified, return the default.
1183:                    getPageCache().resetCacheStats();
1184:                    return;
1185:                }
1186:
1187:                if (cacheName.equals("pageCache")) {
1188:                    getPageCache().resetCacheStats();
1189:                } else {
1190:                    // default
1191:                    getPageCache().resetCacheStats();
1192:                }
1193:            }
1194:
1195:            /**
1196:            	Ask the log factory to flush up to this log instant.
1197:
1198:            	@exception StandardException cannot sync log file
1199:             */
1200:            void flush(LogInstant instant) throws StandardException {
1201:                getLogFactory().flush(instant);
1202:            }
1203:
1204:            /**
1205:            	Ask the log factory to flush the side log up to this bip location
1206:            	Not implemented in this class - subclass who deals with side log must
1207:            	override this.
1208:
1209:            	@exception StandardException Cloudscape Standard Error Policy
1210:             */
1211:            private void syncSideLog(long bipLocation) throws StandardException {
1212:                return;
1213:            }
1214:
1215:            LogFactory getLogFactory() {
1216:                return logFactory;
1217:            }
1218:
1219:            RawStoreFactory getRawStoreFactory() {
1220:                return rawStoreFactory;
1221:            }
1222:
1223:            /**
1224:            	Get the root directory of the data storage area. Is always guaranteed 
1225:                to be an absolute path.
1226:             */
1227:            public String getRootDirectory() {
1228:                return dataDirectory;
1229:            }
1230:
1231:            /**
1232:             * Return the Class of the Containers to be produced by this factory.
1233:             * <p>
1234:             * Concrete implementations of a DataFactory must implement this routine
1235:             * to indicate what kind of containers are produced.  For instance
1236:             * the DataFileFactory produce RAFContainer's.
1237:             * <p>
1238:             * It is expected that this class is called only once, and thus does
1239:             * not worry about the overhead of repeated Class.forName() lookups.
1240:             *
1241:             * @return The Class object for the Container class.
1242:             *
1243:             **/
1244:            Cacheable newContainerObject() {
1245:                if (supportsRandomAccess)
1246:                    return new RAFContainer(this );
1247:                else
1248:                    return new InputStreamContainer(this );
1249:            }
1250:
1251:            /**
1252:             *	This page is going from clean to dirty, this is a chance for the
1253:             *	sub class to do something if so desired
1254:             *
1255:             * @exception StandardException Standard Cloudscape Error Policy
1256:             */
1257:            private void pageToDirty(RawTransaction t, StoredPage page)
1258:                    throws StandardException {
1259:                return; // this implementation does nothing
1260:            }
1261:
1262:            /*
1263:             * Get the loggable page action that is associated with this implementation
1264:             *
1265:             * @return the PageActions
1266:             * @exception StandardExceptions Standard Cloudscape Error Policy
1267:             */
1268:            private PageActions getLoggablePageActions()
1269:                    throws StandardException {
1270:                if (loggablePageActions == null)
1271:                    loggablePageActions = new LoggableActions();
1272:                return loggablePageActions;
1273:            }
1274:
1275:            /**
1276:             * Get the loggable allocation action associated with this implementation
1277:             *
1278:             * @return the PageActions
1279:             */
1280:            private AllocationActions getLoggableAllocationActions() {
1281:                if (loggableAllocActions == null)
1282:                    loggableAllocActions = new LoggableAllocActions();
1283:                return loggableAllocActions;
1284:            }
1285:
1286:            synchronized StorageFile getTempDirectory() {
1287:                actionCode = GET_TEMP_DIRECTORY_ACTION;
1288:                try {
1289:                    return (StorageFile) AccessController.doPrivileged(this );
1290:                } catch (PrivilegedActionException pae) {
1291:                    // getTempDirectory does not actually throw an exception
1292:                    return null;
1293:                }
1294:            }
1295:
1296:            private synchronized void removeTempDirectory() {
1297:                if (storageFactory != null) {
1298:                    actionCode = REMOVE_TEMP_DIRECTORY_ACTION;
1299:                    try {
1300:                        AccessController.doPrivileged(this );
1301:                    } catch (PrivilegedActionException pae) {
1302:                        // removeTempDirectory does not throw an exception
1303:                    }
1304:                }
1305:            }
1306:
1307:            /**
1308:             * Return the path to a container file.
1309:             * <p>
1310:             * Return the path to a container file that is relative to the root 
1311:             * directory.
1312:             * <p>
1313:             * The format of the name of an existing container file is:
1314:             *     segNNN/cXXX.dat
1315:             * The format of the name of a stub describing a dropped container file is:
1316:             *     segNNN/dXXX.dat
1317:             *
1318:             * NNN = segment number, currently 0 is where normal db files are found.
1319:             * XXX = The hex representation of the container number
1320:             *
1321:             * The store will always create containers with this format name, but 
1322:             * the store will also recognize the following two formats when attempting
1323:             * to open files - as some copy tools have uppercased our filesnames when
1324:             * moving across operating systems:
1325:             *
1326:             * The format of the name of an existing container file is:
1327:             *     segNNN/CXXX.DAT
1328:             * The format of the name of a stub describing a dropped container file is:
1329:             *     segNNN/DXXX.DAT
1330:             * <p>
1331:             *
1332:             *
1333:             * @param containerId The container being opened/created
1334:             * @param stub        True if the file name for the stub is requested, 
1335:             *                      otherwise the file name for the data file
1336:             *
1337:             * @return The StorageFile representing path to container relative to root.
1338:             *
1339:             **/
1340:            public StorageFile getContainerPath(ContainerKey containerId,
1341:                    boolean stub) {
1342:                return getContainerPath(containerId, stub,
1343:                        GET_CONTAINER_PATH_ACTION);
1344:            }
1345:
1346:            private synchronized StorageFile getContainerPath(
1347:                    ContainerKey containerId, boolean stub, int code) {
1348:                actionCode = code;
1349:                try {
1350:                    this .containerId = containerId;
1351:                    this .stub = stub;
1352:                    try {
1353:                        return (StorageFile) AccessController
1354:                                .doPrivileged(this );
1355:                    } catch (PrivilegedActionException pae) {
1356:                        // getContainerPath does not throw an exception
1357:                        return null;
1358:                    }
1359:                } finally {
1360:                    this .containerId = null;
1361:                }
1362:            }
1363:
1364:            /**
1365:            	Return an alternate path to container file relative to the root directory.
1366:                The alternate path uses upper case 'C','D', and 'DAT' instead of 
1367:                lower case - there have been cases of people copying the database and
1368:                somehow upper casing all the file names.
1369:
1370:                The intended use is as a bug fix for track 3444.
1371:
1372:            	@param containerId The container being opened/created
1373:            	@param stub True if the file name for the stub is requested, otherwise the file name for the data file
1374:
1375:             */
1376:            public StorageFile getAlternateContainerPath(
1377:                    ContainerKey containerId, boolean stub) {
1378:                return getContainerPath(containerId, stub,
1379:                        GET_ALTERNATE_CONTAINER_PATH_ACTION);
1380:            }
1381:
1382:            /**
1383:            	Remove stubs in this database.  Stubs are committed deleted containers
1384:             */
1385:            private synchronized void removeStubs() {
1386:                actionCode = REMOVE_STUBS_ACTION;
1387:                try {
1388:                    AccessController.doPrivileged(this );
1389:                } catch (PrivilegedActionException pae) {
1390:                    // removeStubs does not throw an exception
1391:                }
1392:            }
1393:
1394:            /**
1395:             * keeps track of information about the stub files of the  committed deleted
1396:             * containers. We use the info to delete them at checkpoints.
1397:             * In addition to the file info , we also keep track of the identity of the
1398:             * container; which helps to remove entry in the cache and the log instant
1399:             * when the stub was created, which helps us to figure out whether we
1400:             * require the stub file for the crash recovery.
1401:             * We maintain the information in a hashtable:
1402:             * key(LOG INSTANT) Values: File handle , and ContainerIdentity.
1403:             **/
1404:            public void stubFileToRemoveAfterCheckPoint(StorageFile file,
1405:                    LogInstant logInstant, Object identity) {
1406:                if (droppedTableStubInfo != null) {
1407:                    Object[] removeInfo = new Object[2];
1408:                    removeInfo[0] = file;
1409:                    removeInfo[1] = identity;
1410:                    droppedTableStubInfo.put(logInstant, removeInfo);
1411:                }
1412:            }
1413:
1414:            /**
1415:             * Delete the stub files that are not required for recovery. A stub file
1416:             * is not required to be around if the recovery is not going to see
1417:             * any log record that belongs to that container. Since the stub files
1418:             * are created as a post commit operation, they are not necessary during
1419:             * undo operation of the recovery.
1420:             *
1421:             * To remove a stub file we have to be sure that it was created before the
1422:             * redoLWM in the check point record. We can be sure that the stub is not
1423:             * required if the log instant when it was created is less than the redoLWM.
1424:             */
1425:            public void removeDroppedContainerFileStubs(LogInstant redoLWM)
1426:                    throws StandardException {
1427:
1428:                if (droppedTableStubInfo != null) {
1429:                    synchronized (droppedTableStubInfo) {
1430:                        for (Enumeration e = droppedTableStubInfo.keys(); e
1431:                                .hasMoreElements();) {
1432:                            LogInstant logInstant = (LogInstant) e
1433:                                    .nextElement();
1434:                            if (logInstant.lessThan(redoLWM)) {
1435:
1436:                                Object[] removeInfo = (Object[]) droppedTableStubInfo
1437:                                        .get(logInstant);
1438:                                Object identity = removeInfo[1];
1439:                                //delete the entry in the container cache.
1440:                                Cacheable ccentry = containerCache
1441:                                        .findCached(identity);
1442:                                if (ccentry != null)
1443:                                    containerCache.remove(ccentry);
1444:
1445:                                //delete the stub we don't require it during recovery
1446:                                synchronized (this ) {
1447:                                    actionFile = (StorageFile) removeInfo[0];
1448:                                    actionCode = DELETE_IF_EXISTS_ACTION;
1449:                                    try {
1450:                                        if (AccessController.doPrivileged(this ) != null) {
1451:                                            //if we successfuly delete the file remove 
1452:                                            //it from the hash table.
1453:                                            droppedTableStubInfo
1454:                                                    .remove(logInstant);
1455:                                        }
1456:                                    } catch (PrivilegedActionException pae) {
1457:                                        // DELETE_IF_EXISTS does not throw an exception
1458:                                    }
1459:                                }
1460:                            }
1461:                        }
1462:                    }
1463:                }
1464:            }
1465:
1466:            /**
1467:             * Find the largest containerid is seg 0.
1468:             * <p>
1469:             * Do a file list of the files in seg0 and return the highest numbered
1470:             * file found.
1471:             * <p>
1472:             * Until I figure out some reliable place to store this information across
1473:             * a boot of the system, this is what is used following a boot to assign
1474:             * the next conglomerate id when a new conglomerate is created.  It is
1475:             * only called at most once, and then the value is cached by calling store
1476:             * code.
1477:             * <p>
1478:             *
1479:             * @return The largest containerid in seg0.
1480:             **/
1481:            private synchronized long findMaxContainerId() {
1482:                actionCode = FIND_MAX_CONTAINER_ID_ACTION;
1483:                try {
1484:                    return ((Long) AccessController.doPrivileged(this ))
1485:                            .longValue();
1486:                } catch (PrivilegedActionException pae) {
1487:                    // findMaxContainerId does not throw an exception
1488:                    return 0;
1489:                }
1490:            }
1491:
1492:            private void bootLogFactory(boolean create, Properties startParams)
1493:                    throws StandardException {
1494:
1495:                if (isReadOnly()) {
1496:                    startParams.put(LogFactory.RUNTIME_ATTRIBUTES,
1497:                            LogFactory.RT_READONLY);
1498:                }
1499:
1500:                logFactory = (LogFactory) Monitor.bootServiceModule(create,
1501:                        this , rawStoreFactory.getLogFactoryModule(),
1502:                        startParams);
1503:            }
1504:
1505:            /**
1506:            	Does this factory support this service type.
1507:             */
1508:            private boolean handleServiceType(String type) {
1509:                try {
1510:                    PersistentService ps = Monitor.getMonitor()
1511:                            .getServiceProvider(type);
1512:                    return ps != null && ps.hasStorageFactory();
1513:                } catch (StandardException se) {
1514:                    return false;
1515:                }
1516:            }
1517:
1518:            /**
1519:            	check to see if we are the only JBMS opened against this database.
1520:
1521:            	<BR>This method does nothing if this database is read only or we cannot
1522:            	access files directly on the database directory.
1523:
1524:            	<BR>We first see if a file named db.lck exists on the top database
1525:            	directory (i.e., the directory where service.properties lives).  If it
1526:            	doesn't exist, we create it and write to it our identity which is
1527:            	generated per boot of the JBMS.
1528:
1529:            	<BR>If the db.lck file already exists when we boot this database, we
1530:            	try to delete it first, assuming that an opened RandomAccessFile can
1531:            	act as a file lock against delete.  If that succeeds, we may hold a
1532:            	file lock against subsequent JBMS that tries to attach to this
1533:            	database before we exit.
1534:
1535:            	<BR>We test to see if we think an opened file will prevent it from
1536:            	being deleted, if so, we will hold on to the open file descriptor and
1537:            	use it as a filelock.  If not, and we started out deleting an existing
1538:            	db.lck file, we issue a warning message to the info stream that we are
1539:            	about to attached to a database which may already have another JBMS
1540:            	attached to it. Then we overwrite that db.lck file with our identity.
1541:
1542:            	<BR>Upon shutdown, we delete the db.lck file.  If the system crash
1543:            	instead of shutdown cleanly, it will be cleaned up the next time the
1544:            	system boots
1545:
1546:            	@exception StandardException another JBMS is already attached to the
1547:            	database at this directory
1548:             */
1549:            private void getJBMSLockOnDB(UUID myUUID, UUIDFactory uuidFactory,
1550:                    String databaseDirectory) throws StandardException {
1551:                if (fileLockOnDB != null) // I already got the lock!
1552:                    return;
1553:
1554:                if (isReadOnly())
1555:                    return;
1556:                if (SanityManager.DEBUG) {
1557:                    if (myUUID == null)
1558:                        SanityManager.THROWASSERT("myUUID == null");
1559:                }
1560:
1561:                synchronized (this ) {
1562:                    actionCode = GET_LOCK_ON_DB_ACTION;
1563:                    this .myUUID = myUUID;
1564:                    this .uuidFactory = uuidFactory;
1565:                    this .databaseDirectory = databaseDirectory;
1566:
1567:                    try {
1568:                        AccessController.doPrivileged(this );
1569:                    } catch (PrivilegedActionException pae) {
1570:                        throw (StandardException) pae.getException();
1571:                    } finally {
1572:                        this .myUUID = null;
1573:                        this .uuidFactory = null;
1574:                        this .databaseDirectory = null;
1575:                    }
1576:                }
1577:
1578:                // OK file lock is reliable, we think... keep the fileLockOnDB file
1579:                // descriptor open to prevent other JBMS from booting
1580:                // fileLockOnDB is not null in this case
1581:            }
1582:
1583:            // Called from within a privilege block
1584:            private void privGetJBMSLockOnDB() throws StandardException {
1585:                boolean fileLockExisted = false;
1586:                String blownUUID = null;
1587:
1588:                StorageFile fileLock = storageFactory
1589:                        .newStorageFile(DB_LOCKFILE_NAME);
1590:
1591:                try {
1592:                    // assume we are not read only
1593:                    // SECURITY PERMISSION MP1
1594:                    if (fileLock.exists()) {
1595:                        fileLockExisted = true;
1596:
1597:                        // see what it says in case we cannot count on delete failing
1598:                        // when someone else have an opened file descriptor.
1599:                        // I may be blowing this JBMS's lock away
1600:                        // SECURITY PERMISSION MP1
1601:                        // SECURITY PERMISSION OP4
1602:                        fileLockOnDB = fileLock.getRandomAccessFile("rw");
1603:                        try {
1604:                            blownUUID = fileLockOnDB.readUTF();
1605:                        } catch (IOException ioe) {
1606:                            // The previous owner of the lock may have died before
1607:                            // finish writing its UUID down.
1608:                            fileLockExisted = false;
1609:                        }
1610:
1611:                        fileLockOnDB.close();
1612:                        fileLockOnDB = null;
1613:
1614:                        // SECURITY PERMISSION OP5
1615:                        if (!fileLock.delete()) {
1616:                            throw StandardException.newException(
1617:                                    SQLState.DATA_MULTIPLE_JBMS_ON_DB,
1618:                                    databaseDirectory);
1619:                        }
1620:                    }
1621:
1622:                    // if file does not exists, we grab it immediately - there is a
1623:                    // possibility that some other JBMS got to it sooner than we do,
1624:                    // check the UUID after we write it to make sure
1625:                    // SECURITY PERMISSION MP1
1626:                    // SECURITY PERMISSION OP5
1627:                    fileLockOnDB = fileLock.getRandomAccessFile("rw");
1628:
1629:                    // write it out for future reference
1630:                    fileLockOnDB.writeUTF(myUUID.toString());
1631:
1632:                    fileLockOnDB.sync(false);
1633:                    fileLockOnDB.seek(0);
1634:                    // check the UUID
1635:                    UUID checkUUID = uuidFactory.recreateUUID(fileLockOnDB
1636:                            .readUTF());
1637:                    if (!checkUUID.equals(myUUID)) {
1638:                        throw StandardException.newException(
1639:                                SQLState.DATA_MULTIPLE_JBMS_ON_DB,
1640:                                databaseDirectory);
1641:                    }
1642:                } catch (IOException ioe) {
1643:                    // probably a read only db, don't do anything more
1644:                    readOnly = true;
1645:                    try {
1646:                        if (fileLockOnDB != null)
1647:                            fileLockOnDB.close();
1648:                    } catch (IOException ioe2) { /* did the best I could */
1649:                    }
1650:                    fileLockOnDB = null;
1651:
1652:                    return;
1653:                }
1654:
1655:                if (fileLock.delete()) {
1656:                    // if I can delete it while I am holding a opened file descriptor,
1657:                    // then the file lock is unreliable - send out a warning if I
1658:                    // have blown off another JBMS's lock on the DB
1659:
1660:                    Object[] args = new Object[3];
1661:                    args[0] = myUUID;
1662:                    args[1] = databaseDirectory;
1663:                    args[2] = blownUUID;
1664:
1665:                    //Try the exlcusive file lock method approach available in jdk1.4 or
1666:                    //above jvms where delete machanism  does not reliably prevent 
1667:                    //double booting of derby databases. If we don't get a reliable 
1668:                    //exclusive lock still we send out a warning.
1669:
1670:                    int exLockStatus = StorageFile.NO_FILE_LOCK_SUPPORT;
1671:                    //If user has chosen to force lock option don't bother
1672:                    //about applying exclusive file lock mechanism 
1673:                    if (!throwDBlckException) {
1674:                        exFileLock = storageFactory
1675:                                .newStorageFile(DB_EX_LOCKFILE_NAME);
1676:                        exLockStatus = exFileLock.getExclusiveFileLock();
1677:                    }
1678:
1679:                    if (exLockStatus == StorageFile.NO_FILE_LOCK_SUPPORT) {
1680:                        if (fileLockExisted && !throwDBlckException) {
1681:
1682:                            StandardException multipleJBMSWarning = StandardException
1683:                                    .newException(
1684:                                            SQLState.DATA_MULTIPLE_JBMS_WARNING,
1685:                                            args);
1686:
1687:                            String warningMsg = MessageService
1688:                                    .getCompleteMessage(
1689:                                            SQLState.DATA_MULTIPLE_JBMS_WARNING,
1690:                                            args);
1691:
1692:                            logMsg(warningMsg);
1693:
1694:                            // RESOLVE - need warning support.  Output to
1695:                            // system.err.println rather than just send warning 
1696:                            // message to derby.log.
1697:                            System.err.println(warningMsg);
1698:
1699:                        }
1700:                    }
1701:
1702:                    // filelock is unreliable, but we should at least leave a file
1703:                    // there to warn the next person
1704:                    try {
1705:                        // the existing fileLockOnDB file descriptor may already be
1706:                        // deleted by the delete call, close it and create the file 
1707:                        // again
1708:                        if (fileLockOnDB != null)
1709:                            fileLockOnDB.close();
1710:                        fileLockOnDB = fileLock.getRandomAccessFile("rw");
1711:
1712:                        // write it out for future reference
1713:                        fileLockOnDB.writeUTF(myUUID.toString());
1714:
1715:                        fileLockOnDB.sync(false);
1716:                        fileLockOnDB.close();
1717:                    } catch (IOException ioe) {
1718:                        try {
1719:                            fileLockOnDB.close();
1720:                        } catch (IOException ioe2) {
1721:                            /* did the best I could */
1722:                        }
1723:                    } finally {
1724:                        fileLockOnDB = null;
1725:                    }
1726:
1727:                    if (fileLockExisted && throwDBlckException) {
1728:                        // user has chosen that we always throw exception, throw it
1729:                        // now that we have reinstated the lock file.
1730:                        throw StandardException.newException(
1731:                                SQLState.DATA_MULTIPLE_JBMS_FORCE_LOCK, args);
1732:                    }
1733:
1734:                    if (exLockStatus == StorageFile.EXCLUSIVE_FILE_LOCK_NOT_AVAILABLE) {
1735:
1736:                        throw StandardException.newException(
1737:                                SQLState.DATA_MULTIPLE_JBMS_ON_DB,
1738:                                databaseDirectory);
1739:                    }
1740:
1741:                }
1742:            } // end of privGetJBMSLockOnDB
1743:
1744:            private void releaseJBMSLockOnDB() {
1745:                if (isReadOnly())
1746:                    return;
1747:
1748:                synchronized (this ) {
1749:                    actionCode = RELEASE_LOCK_ON_DB_ACTION;
1750:                    try {
1751:                        AccessController.doPrivileged(this );
1752:                    } catch (PrivilegedActionException pae) {
1753:                        // do nothing - it may be read only medium, who knows what the
1754:                        // problem is
1755:                    } finally {
1756:                        fileLockOnDB = null;
1757:                    }
1758:                }
1759:            }
1760:
1761:            private void privReleaseJBMSLockOnDB() throws IOException {
1762:                if (fileLockOnDB != null)
1763:                    fileLockOnDB.close();
1764:
1765:                if (storageFactory != null) {
1766:                    StorageFile fileLock = storageFactory
1767:                            .newStorageFile(DB_LOCKFILE_NAME);
1768:
1769:                    fileLock.delete();
1770:                }
1771:
1772:                //release the lock that is acquired using tryLock() to prevent
1773:                //multiple jvm booting the same database on Unix environments.
1774:                if (exFileLock != null)
1775:                    exFileLock.releaseExclusiveFileLock();
1776:
1777:                return;
1778:            } // end of privReleaseJBMSLockOnDB
1779:
1780:            private void logMsg(String msg) {
1781:                if (istream == null) {
1782:                    istream = Monitor.getStream();
1783:                }
1784:
1785:                istream.println(msg);
1786:            }
1787:
1788:            public final boolean databaseEncrypted() {
1789:                return databaseEncrypted;
1790:            }
1791:
1792:            public void setDatabaseEncrypted() {
1793:                databaseEncrypted = true;
1794:            }
1795:
1796:            public int encrypt(byte[] cleartext, int offset, int length,
1797:                    byte[] ciphertext, int outputOffset, boolean newEngine)
1798:                    throws StandardException {
1799:                return rawStoreFactory.encrypt(cleartext, offset, length,
1800:                        ciphertext, outputOffset, newEngine);
1801:            }
1802:
1803:            public int decrypt(byte[] ciphertext, int offset, int length,
1804:                    byte[] cleartext, int outputOffset)
1805:                    throws StandardException {
1806:                return rawStoreFactory.decrypt(ciphertext, offset, length,
1807:                        cleartext, outputOffset);
1808:            }
1809:
1810:            public void encryptAllContainers(RawTransaction t)
1811:                    throws StandardException {
1812:                containerEncrypter = new EncryptData(this );
1813:                // encrypt all the conatiners in the databse
1814:                containerEncrypter.encryptAllContainers(t);
1815:            }
1816:
1817:            /*
1818:             * Remover old versions of the containers after (re)encryption 
1819:             * of the  database. 
1820:             * @param inRecovery  <code> true </code>, if cleanup is 
1821:             *                     happening during recovery.
1822:             */
1823:            public void removeOldVersionOfContainers(boolean inRecovery)
1824:                    throws StandardException {
1825:                // check if old containers are being during recovery 
1826:                // because of a crash after successful completion of 
1827:                // (re)encryption of the  dataabase, but before the 
1828:                // (re)encryption cleanup  was complete. 
1829:                if (inRecovery) {
1830:                    containerEncrypter = new EncryptData(this );
1831:                }
1832:                containerEncrypter.removeOldVersionOfContainers(inRecovery);
1833:                containerEncrypter = null;
1834:            }
1835:
1836:            /**
1837:            	Returns the encryption block size used by the algorithm at time of
1838:            	creation of an encrypted database
1839:             */
1840:            public int getEncryptionBlockSize() {
1841:                return rawStoreFactory.getEncryptionBlockSize();
1842:            }
1843:
1844:            public String getVersionedName(String name, long generationId) {
1845:                return name.concat(".G".concat(Long.toString(generationId)));
1846:            }
1847:
1848:            /**
1849:             * Return an id which can be used to create a container.
1850:             * <p>
1851:             * Return an id number with is greater than any existing container
1852:             * in the current database.  Caller will use this to allocate future
1853:             * container numbers - most likely caching the value and then incrementing
1854:             * it as it is used.
1855:             * <p>
1856:             *
1857:             * @return The an id which can be used to create a container.
1858:             *
1859:             * @exception  StandardException  Standard exception policy.
1860:             **/
1861:            public long getMaxContainerId() throws StandardException {
1862:                return (findMaxContainerId());
1863:            }
1864:
1865:            synchronized long getNextId() {
1866:                return nextContainerId++;
1867:            }
1868:
1869:            /** return a secure random number */
1870:            int random() {
1871:                return databaseEncrypted ? rawStoreFactory.random() : 0;
1872:            }
1873:
1874:            /**
1875:            	Add a file to the list of files to be removed post recovery.
1876:             */
1877:            void fileToRemove(StorageFile file, boolean remove) {
1878:                if (postRecoveryRemovedFiles == null)
1879:                    postRecoveryRemovedFiles = new Hashtable();
1880:                String path = null;
1881:                synchronized (this ) {
1882:                    actionCode = GET_PATH_ACTION;
1883:                    actionFile = file;
1884:                    try {
1885:                        path = (String) AccessController.doPrivileged(this );
1886:                    } catch (PrivilegedActionException pae) {
1887:                        // GET_PATH does not throw an exception
1888:                    } finally {
1889:                        actionFile = null;
1890:                    }
1891:                }
1892:                if (remove) // to be removed
1893:                    postRecoveryRemovedFiles.put(path, file);
1894:                else
1895:                    postRecoveryRemovedFiles.remove(path);
1896:
1897:            }
1898:
1899:            /**
1900:            	Called after recovery is performed.
1901:
1902:            	@exception StandardException Standard Cloudscape Error Policy
1903:             */
1904:            public void postRecovery() throws StandardException {
1905:
1906:                // hook up the cache cleaner daemon after recovery is finished
1907:                DaemonService daemon = rawStoreFactory.getDaemon();
1908:
1909:                if (daemon == null)
1910:                    return;
1911:
1912:                containerCache.useDaemonService(daemon);
1913:
1914:                pageCache.useDaemonService(daemon);
1915:                if (postRecoveryRemovedFiles != null) {
1916:                    synchronized (this ) {
1917:                        actionCode = POST_RECOVERY_REMOVE_ACTION;
1918:                        try {
1919:                            AccessController.doPrivileged(this );
1920:                        } catch (PrivilegedActionException pae) {
1921:                            // POST_RECOVERY_REMOVE does not throw an exception
1922:                        }
1923:                    }
1924:                    postRecoveryRemovedFiles = null;
1925:                }
1926:            }
1927:
1928:            public void freezePersistentStore() throws StandardException {
1929:                synchronized (freezeSemaphore) {
1930:                    if (isFrozen) {
1931:                        throw StandardException
1932:                                .newException(SQLState.RAWSTORE_NESTED_FREEZE);
1933:                    }
1934:
1935:                    // set this to true first to stop all writes from starting after
1936:                    // this.
1937:                    isFrozen = true;
1938:
1939:                    // wait for all in progress write to finish
1940:                    try {
1941:                        while (writersInProgress > 0) {
1942:                            try {
1943:                                freezeSemaphore.wait();
1944:                            } catch (InterruptedException ie) {
1945:                                // make sure we are not stuck in frozen state if we
1946:                                // caught an interrupt exception and the calling 
1947:                                // thread may not have a chance to call unfreeze
1948:                                isFrozen = false;
1949:                                freezeSemaphore.notifyAll();
1950:
1951:                                throw StandardException.interrupt(ie);
1952:                            }
1953:                        }
1954:                    } catch (RuntimeException rte) {
1955:                        // make sure we are not stuck in frozen state if we
1956:                        // caught a run time exception and the calling thread may not
1957:                        // have a chance to call unfreeze
1958:                        isFrozen = false;
1959:                        freezeSemaphore.notifyAll();
1960:                        throw rte; // rethrow run time exception
1961:                    }
1962:
1963:                    if (SanityManager.DEBUG)
1964:                        SanityManager.ASSERT(writersInProgress == 0
1965:                                && isFrozen == true,
1966:                                "data store is not properly frozen");
1967:                }
1968:            }
1969:
1970:            public void unfreezePersistentStore() {
1971:                synchronized (freezeSemaphore) {
1972:                    isFrozen = false;
1973:                    freezeSemaphore.notifyAll();
1974:                }
1975:            }
1976:
1977:            public void writeInProgress() throws StandardException {
1978:                synchronized (freezeSemaphore) {
1979:                    // do not start write, the persistent store is frozen
1980:                    while (isFrozen) {
1981:                        try {
1982:                            freezeSemaphore.wait();
1983:                        } catch (InterruptedException ie) {
1984:                            throw StandardException.interrupt(ie);
1985:                        }
1986:                    }
1987:
1988:                    // store is not frozen, proceed to write - do this last
1989:                    writersInProgress++;
1990:                }
1991:            }
1992:
1993:            public void writeFinished() {
1994:                synchronized (freezeSemaphore) {
1995:                    if (SanityManager.DEBUG)
1996:                        SanityManager.ASSERT(writersInProgress > 0,
1997:                                "no writers in progress");
1998:
1999:                    writersInProgress--;
2000:                    freezeSemaphore.notifyAll(); // wake up the freezer
2001:                }
2002:            }
2003:
2004:            /*
2005:             *  Find all the all the containers stored in the seg0 directory and 
2006:             *  backup each container to the specified backup location.
2007:             */
2008:            public void backupDataFiles(Transaction rt, File backupDir)
2009:                    throws StandardException {
2010:
2011:                /*
2012:                 * List of containers that needs to be backed up are identified by 
2013:                 * simply reading the list of files in seg0. 
2014:                 * All container that are created after the container list is created 
2015:                 * when backup is in progress are recreated on restore using the
2016:                 * transaction log.
2017:                 */
2018:
2019:                String[] files = getContainerNames();
2020:
2021:                if (files != null) {
2022:                    // No user visible locks are acquired to backup the database. A stable backup 
2023:                    // is made by latching the pages and internal synchronization
2024:                    // mechanisms.
2025:                    LockingPolicy lockPolicy = rt.newLockingPolicy(
2026:                            LockingPolicy.MODE_NONE,
2027:                            TransactionController.ISOLATION_NOLOCK, false);
2028:                    long segmentId = 0;
2029:
2030:                    // loop through all the files in seg0 and backup all valid containers.
2031:                    for (int f = files.length - 1; f >= 0; f--) {
2032:                        long containerId;
2033:                        try {
2034:                            containerId = Long.parseLong(files[f].substring(1,
2035:                                    (files[f].length() - 4)), 16);
2036:                        } catch (Throwable t) {
2037:                            // ignore errors from parse, it just means that someone put
2038:                            // a file in seg0 that we didn't expect.  Continue with the
2039:                            // next one.
2040:                            continue;
2041:                        }
2042:
2043:                        ContainerKey identity = new ContainerKey(segmentId,
2044:                                containerId);
2045:
2046:                        /* Not necessary to get the container thru the transaction.
2047:                         * Backup opens in container in read only mode , No need to 
2048:                         * transition the transaction to active state. 
2049:                         * 
2050:                         *  dropped container stubs also has to be backed up 
2051:                         *  for restore to work correctly. That is 
2052:                         *  why we are using a open call that let us
2053:                         *  open dropped containers.
2054:                         */
2055:
2056:                        ContainerHandle containerHdl = openDroppedContainer(
2057:                                (RawTransaction) rt, identity, lockPolicy,
2058:                                ContainerHandle.MODE_READONLY);
2059:                        /*
2060:                         * Note 1:
2061:                         * If a container creation is  in progress , open call will wait 
2062:                         * until it is complete; It will never return a handle to a 
2063:                         * container that is partially created. (see cache manager code
2064:                         * for more details)
2065:                         *
2066:                         * Note 2: 
2067:                         * if a container creation failed in the middle after the list 
2068:                         * of the names are read from seg0, it will not exist in
2069:                         * the database any more, so nothing to backup.  Attempt 
2070:                         * to open such container will return null.
2071:                         * 
2072:                         */
2073:
2074:                        if (containerHdl != null) {
2075:                            containerHdl.backupContainer(backupDir.getPath());
2076:                            containerHdl.close();
2077:                        }
2078:                    }
2079:                } else {
2080:                    if (SanityManager.DEBUG)
2081:                        SanityManager
2082:                                .THROWASSERT("backup process is unable to read container names in seg0");
2083:                }
2084:            }
2085:
2086:            /**
2087:             * get all the names of the files in seg 0.
2088:             * MT - This method needs to be synchronized to avoid conflicts 
2089:             * with other privileged actions execution in this class.
2090:             * @return An array of all the file names in seg0.
2091:             **/
2092:            synchronized String[] getContainerNames() {
2093:                actionCode = GET_CONTAINER_NAMES_ACTION;
2094:                try {
2095:                    return (String[]) AccessController.doPrivileged(this );
2096:                } catch (PrivilegedActionException pae) {
2097:                    return null;
2098:                }
2099:            }
2100:
2101:            /**
2102:             * removes the data directory(seg*) from database home directory and
2103:             * restores it from backup location.
2104:             * This function gets called only when any of the folling attributes
2105:             * are specified on connection URL:
2106:             * Attribute.CREATE_FROM (Create database from backup if it does not exist)
2107:             * Attribute.RESTORE_FROM (Delete the whole database if it exists and 
2108:             *     then restore * it from backup)
2109:             * Attribute.ROLL_FORWARD_RECOVERY_FROM:(Perform Rollforward Recovery;
2110:             * except for the log directory everthing else is replced  by the copy  from
2111:             * backup. log files in the backup are copied to the existing online log 
2112:             * directory.
2113:             *
2114:             * In all the cases, data directory(seg*) is replaced by the data directory
2115:             * directory from backup when this function is called.
2116:             */
2117:            private void restoreDataDirectory(String backupPath)
2118:                    throws StandardException {
2119:                File bsegdir; //segment directory in the backup
2120:                File backupRoot = new java.io.File(backupPath); //root dir of backup db
2121:
2122:                /* To be safe we first check if the backup directory exist and it has
2123:                 * atleast one seg* directory before removing the current data directory.
2124:                 *
2125:                 * This will fail with a security exception unless the database engine 
2126:                 * and all its callers have permission to read the backup directory.
2127:                 */
2128:                String[] bfilelist = backupRoot.list();
2129:                if (bfilelist != null) {
2130:                    boolean segmentexist = false;
2131:                    for (int i = 0; i < bfilelist.length; i++) {
2132:                        //check if it is a  seg* directory
2133:                        if (bfilelist[i].startsWith("seg")) {
2134:                            bsegdir = new File(backupRoot, bfilelist[i]);
2135:                            if (bsegdir.exists() && bsegdir.isDirectory()) {
2136:                                segmentexist = true;
2137:                                break;
2138:                            }
2139:                        }
2140:                    }
2141:
2142:                    if (!segmentexist) {
2143:                        throw StandardException.newException(
2144:                                SQLState.DATA_DIRECTORY_NOT_FOUND_IN_BACKUP,
2145:                                backupRoot);
2146:                    }
2147:                } else {
2148:
2149:                    throw StandardException.newException(
2150:                            SQLState.DATA_DIRECTORY_NOT_FOUND_IN_BACKUP,
2151:                            backupRoot);
2152:                }
2153:
2154:                synchronized (this ) {
2155:                    actionCode = RESTORE_DATA_DIRECTORY_ACTION;
2156:                    this .backupPath = backupPath;
2157:                    this .backupRoot = backupRoot;
2158:                    this .bfilelist = bfilelist;
2159:                    try {
2160:                        AccessController.doPrivileged(this );
2161:                    } catch (PrivilegedActionException pae) {
2162:                        throw (StandardException) pae.getException();
2163:                    } finally {
2164:                        this .backupPath = null;
2165:                        this .backupRoot = null;
2166:                        this .bfilelist = null;
2167:                    }
2168:                }
2169:            }
2170:
2171:            private void privRestoreDataDirectory() throws StandardException {
2172:                StorageFile csegdir; //segment directory in the current db home
2173:                StorageFile dataRoot = storageFactory.newStorageFile(null); //root dir of db
2174:
2175:                //Remove the seg* directories in the current database home directory
2176:                String[] cfilelist = dataRoot.list();
2177:                if (cfilelist != null) {
2178:                    for (int i = 0; i < cfilelist.length; i++) {
2179:                        //delete only the seg* directories in the database home
2180:                        if (cfilelist[i].startsWith("seg")) {
2181:                            csegdir = storageFactory
2182:                                    .newStorageFile(cfilelist[i]);
2183:                            if (!csegdir.deleteAll()) {
2184:                                throw StandardException
2185:                                        .newException(
2186:                                                SQLState.UNABLE_TO_REMOVE_DATA_DIRECTORY,
2187:                                                csegdir);
2188:                            }
2189:                        }
2190:                    }
2191:                }
2192:
2193:                //copy the seg* directories from backup to current database home
2194:                for (int i = 0; i < bfilelist.length; i++) {
2195:                    //copy only the seg* directories and copy them from backup
2196:                    if (bfilelist[i].startsWith("seg")) {
2197:                        csegdir = storageFactory.newStorageFile(bfilelist[i]);
2198:                        File bsegdir1 = new java.io.File(backupRoot,
2199:                                bfilelist[i]);
2200:                        if (!FileUtil.copyDirectory(writableStorageFactory,
2201:                                bsegdir1, csegdir)) {
2202:                            throw StandardException.newException(
2203:                                    SQLState.UNABLE_TO_COPY_DATA_DIRECTORY,
2204:                                    bsegdir1, csegdir);
2205:                        }
2206:                    } else if (databaseEncrypted
2207:                            && bfilelist[i]
2208:                                    .startsWith(Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE)) {
2209:                        // Case of encrypted database and usage of an external 
2210:                        // encryption key, there is an extra file with name given by 
2211:                        // Attribute.CRYPTO_EXTERNAL_KEY_VERIFY_FILE that needs to be
2212:                        // copied over during createFrom/restore operations.
2213:
2214:                        //copy the file
2215:                        File fromFile = new File(backupRoot, bfilelist[i]);
2216:                        StorageFile toFile = storageFactory
2217:                                .newStorageFile(bfilelist[i]);
2218:
2219:                        if (!FileUtil.copyFile(writableStorageFactory,
2220:                                fromFile, toFile)) {
2221:                            throw StandardException.newException(
2222:                                    SQLState.UNABLE_TO_COPY_DATA_DIRECTORY,
2223:                                    bfilelist[i], toFile);
2224:                        }
2225:                    }
2226:                }
2227:
2228:            } // end of privRestoreDataDirectory
2229:
2230:            /**
2231:            	Is the store read-only.
2232:             */
2233:            public boolean isReadOnly() {
2234:                // return what the baseDataFileFactory thinks
2235:                return readOnly;
2236:            }
2237:
2238:            /**
2239:             * @return The StorageFactory used by this dataFactory
2240:             */
2241:            public StorageFactory getStorageFactory() {
2242:                return storageFactory;
2243:            }
2244:
2245:            // PrivilegedExceptionAction method
2246:            public final Object run() throws Exception {
2247:                switch (actionCode) {
2248:                case BOOT_ACTION:
2249:                    readOnly = storageFactory.isReadOnlyDatabase();
2250:                    supportsRandomAccess = storageFactory
2251:                            .supportsRandomAccess();
2252:                    return null;
2253:
2254:                case GET_TEMP_DIRECTORY_ACTION:
2255:                    return storageFactory.getTempDir();
2256:
2257:                case REMOVE_TEMP_DIRECTORY_ACTION:
2258:                    StorageFile tempDir = storageFactory.getTempDir();
2259:                    if (tempDir != null)
2260:                        tempDir.deleteAll();
2261:                    return null;
2262:
2263:                case GET_CONTAINER_PATH_ACTION:
2264:                case GET_ALTERNATE_CONTAINER_PATH_ACTION: {
2265:                    StringBuffer sb = new StringBuffer("seg");
2266:                    sb.append(containerId.getSegmentId());
2267:                    sb.append(storageFactory.getSeparator());
2268:                    if (actionCode == GET_CONTAINER_PATH_ACTION) {
2269:                        sb.append(stub ? 'd' : 'c');
2270:                        sb.append(Long
2271:                                .toHexString(containerId.getContainerId()));
2272:                        sb.append(".dat");
2273:                    } else {
2274:                        sb.append(stub ? 'D' : 'C');
2275:                        sb.append(Long
2276:                                .toHexString(containerId.getContainerId()));
2277:                        sb.append(".DAT");
2278:                    }
2279:                    return storageFactory.newStorageFile(sb.toString());
2280:                } // end of cases GET_CONTAINER_PATH_ACTION & GET_ALTERNATE_CONTAINER_PATH_ACTION
2281:
2282:                case REMOVE_STUBS_ACTION: {
2283:                    char separator = storageFactory.getSeparator();
2284:                    StorageFile root = storageFactory.newStorageFile(null);
2285:
2286:                    // get all the non-temporary data segment, they start with "seg"
2287:                    String[] segs = root.list();
2288:                    for (int s = segs.length - 1; s >= 0; s--) {
2289:                        if (segs[s].startsWith("seg")) {
2290:                            StorageFile seg = storageFactory.newStorageFile(
2291:                                    root, segs[s]);
2292:
2293:                            if (seg.exists() && seg.isDirectory()) {
2294:                                String[] files = seg.list();
2295:                                for (int f = files.length - 1; f >= 0; f--) {
2296:                                    // stub
2297:                                    if (files[f].startsWith("D")
2298:                                            || files[f].startsWith("d")) {
2299:                                        StorageFile stub = storageFactory
2300:                                                .newStorageFile(root, segs[s]
2301:                                                        + separator + files[f]);
2302:
2303:                                        boolean delete_status = stub.delete();
2304:
2305:                                        if (SanityManager.DEBUG) {
2306:                                            // delete should always work, code which
2307:                                            // created the StorageFactory already 
2308:                                            // checked for existence.
2309:                                            if (!delete_status) {
2310:                                                SanityManager
2311:                                                        .THROWASSERT("delete of stub ("
2312:                                                                + stub
2313:                                                                + ") failed.");
2314:                                            }
2315:                                        }
2316:                                    }
2317:                                }
2318:                            }
2319:                        }
2320:                    }
2321:                    break;
2322:                } // end of case REMOVE_STUBS_ACTION
2323:
2324:                case FIND_MAX_CONTAINER_ID_ACTION: {
2325:                    long maxnum = 1;
2326:                    StorageFile seg = storageFactory.newStorageFile("seg0");
2327:
2328:                    if (seg.exists() && seg.isDirectory()) {
2329:                        // create an array with names of all files in seg0
2330:                        String[] files = seg.list();
2331:
2332:                        // loop through array looking for maximum containerid.
2333:                        for (int f = files.length - 1; f >= 0; f--) {
2334:                            try {
2335:                                long fileNumber = Long.parseLong(files[f]
2336:                                        .substring(1, (files[f].length() - 4)),
2337:                                        16);
2338:
2339:                                if (fileNumber > maxnum)
2340:                                    maxnum = fileNumber;
2341:                            } catch (Throwable t) {
2342:                                // ignore errors from parse, it just means that someone 
2343:                                // put a file in seg0 that we didn't expect.  Continue 
2344:                                // with the next one.
2345:                            }
2346:                        }
2347:                    }
2348:                    return ReuseFactory.getLong(maxnum);
2349:                } // end of case FIND_MAX_CONTAINER_ID_ACTION
2350:
2351:                case DELETE_IF_EXISTS_ACTION: {
2352:                    boolean ret = actionFile.exists() && actionFile.delete();
2353:                    actionFile = null;
2354:                    return ret ? this  : null;
2355:                } // end of case DELETE_IF_EXISTS_ACTION
2356:
2357:                case GET_PATH_ACTION: {
2358:                    String path = actionFile.getPath();
2359:                    actionFile = null;
2360:                    return path;
2361:                } // end of case GET_PATH_ACTION
2362:
2363:                case POST_RECOVERY_REMOVE_ACTION: {
2364:                    for (Enumeration e = postRecoveryRemovedFiles.elements(); e
2365:                            .hasMoreElements();) {
2366:                        StorageFile f = (StorageFile) e.nextElement();
2367:                        if (f.exists()) {
2368:                            boolean delete_status = f.delete();
2369:
2370:                            if (SanityManager.DEBUG) {
2371:                                // delete should always work, code which
2372:                                // created the StorageFactory already 
2373:                                // checked for existence.
2374:                                if (!delete_status) {
2375:                                    SanityManager
2376:                                            .THROWASSERT("delete of stub ("
2377:                                                    + stub + ") failed.");
2378:                                }
2379:                            }
2380:                        }
2381:                    }
2382:                    return null;
2383:                }
2384:
2385:                case GET_LOCK_ON_DB_ACTION:
2386:                    privGetJBMSLockOnDB();
2387:                    return null;
2388:
2389:                case RELEASE_LOCK_ON_DB_ACTION:
2390:                    privReleaseJBMSLockOnDB();
2391:                    return null;
2392:
2393:                case RESTORE_DATA_DIRECTORY_ACTION:
2394:                    privRestoreDataDirectory();
2395:                    return null;
2396:                case GET_CONTAINER_NAMES_ACTION: {
2397:                    StorageFile seg = storageFactory.newStorageFile("seg0");
2398:                    if (seg.exists() && seg.isDirectory()) {
2399:                        // return the  names of all files in seg0
2400:                        return seg.list();
2401:                    }
2402:                    return null;
2403:                } // end of case GET_CONTAINER_NAMES_ACTION
2404:
2405:                }
2406:                return null;
2407:            } // end of run
2408:        }
w___w__w__.___ja___v__a___2_s.__c_o___m_ | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.