0001: /*
0002: * Copyright 2001 Sun Microsystems, Inc. All rights reserved.
0003: * PROPRIETARY/CONFIDENTIAL. Use of this product is subject to license terms.
0004: */
0005:
0006: package com.sun.portal.search.db;
0007:
0008: import com.sun.portal.search.util.*;
0009: import com.sun.portal.search.rdm.*;
0010: import com.sun.portal.search.soif.SOIF;
0011:
0012: import com.sleepycat.db.*;
0013: import com.sun.portal.search.soif.SOIFOutputStream;
0014:
0015: import java.io.*;
0016: import java.util.*;
0017: import java.util.logging.Level;
0018:
0019: /**
0020: * Shared info per db environment
0021: */
0022: class DbEnvInfo {
0023: int dbenv_opened;
0024: DbEnv dbenv;
0025: String dbhome;
0026: long last_log; // when this db environment was last log archived
0027: }
0028:
0029: /**
0030: * Partitioned database
0031: */
0032: public class PartitionedDb {
0033:
0034: public static final int MD5_BYTES = 16;
0035: public static final int MAXEXTEN = 64;
0036: public static final int DEFAULT_CACHESIZE = (20 * 1024 * 1024);
0037:
0038: public static final String ROOTDB = "root";
0039: public static final String EXTENT = "extent";
0040: public static final String INDEX = "index";
0041: public static final String TYPE = "type";
0042: public static final String UNIQ = "unique";
0043: public static final String DBASE = "database";
0044: public static final String RD_DB = "rd.db";
0045:
0046: // index types
0047: public static final int INTTYPE = 0x1;
0048: public static final int TIMETYPE = 0x2;
0049: public static final int STRTYPE = 0x4;
0050:
0051: // checkpoint and logclean
0052: public static final int DEFAULT_CK_KB = 1000;
0053: public static final int DEFAULT_CK_MIN = 15;
0054: public static final int DEFAULT_LOGCLEAN_MIN = 2;
0055:
0056: int flags;
0057: String dbname;
0058: int extents;
0059: String extent[];
0060: AutoCommitDb db[]; // partition array
0061: int ck_kb; // kbytes b/w checkpoints (-1 => no checkpoint)
0062: int ck_min; // minutes b/w checkpoints (-1 => no checkpoint)
0063: int logclean_min; // minutes b/w log clean checks (-1 => no log clean)
0064: DbEnv dbenv;
0065:
0066: // stores singleton init info per db home dir
0067: // per application (per JVM? per classloader?)
0068: static Map dbEnvInfoMap = new HashMap();
0069: DbEnvInfo dbi = null;
0070:
0071: /**
0072: * Constructs a new, uninitialised, unopened cdbm object.
0073: */
0074: public PartitionedDb() {
0075: ck_kb = PartitionedDb.DEFAULT_CK_KB;
0076: ck_min = PartitionedDb.DEFAULT_CK_MIN;
0077: logclean_min = PartitionedDb.DEFAULT_LOGCLEAN_MIN;
0078: }
0079:
0080: public String getName() {
0081: return dbname;
0082: }
0083:
0084: /**
0085: * Returns 0 if present, or Db.DB_NOTFOUND or DBb.DB_KEYEMPTY (for missing recno key)
0086: * @param st
0087: * @param key
0088: * @param result
0089: * @param flags
0090: * @param t
0091: * @throws RDMException
0092: * @return
0093: */
0094: public int fetch(SToken st, Datum key, Datum result, int flags,
0095: RDMTransaction t) throws RDMException {
0096: return map(key).fetch(key, result, flags, t);
0097: }
0098:
0099: /**
0100: * Store
0101: * @param st
0102: * @param key
0103: * @param newdata
0104: * @param flags
0105: * @param t
0106: * @throws RDMException
0107: */
0108: public void store(SToken st, Datum key, Datum newdata, int flags,
0109: RDMTransaction t) throws RDMException {
0110: // NB: We would need to create a transaction here if we wanted
0111: // to do anything complex, like indexing, etc
0112: map(key).store(key, newdata, flags, t);
0113: housekeep();
0114: }
0115:
0116: /**
0117: * Delete - creates a transaction if none supplied.
0118: * @param st
0119: * @param key
0120: * @param flags
0121: * @param t
0122: * @throws RDMException
0123: */
0124: public void delete(SToken st, Datum key, int flags, RDMTransaction t)
0125: throws RDMException {
0126: map(key).delete(key, flags, t);
0127: housekeep();
0128: }
0129:
0130: /**
0131: * @param st
0132: * @param t
0133: * @throws RDMException
0134: * @return
0135: */
0136: public int count(SToken st, RDMTransaction t) throws RDMException {
0137: // txn not used - unsafe
0138: int count = 0;
0139: Datum key, data;
0140: DbCursor cursor = null;
0141: key = new Datum();
0142: data = new Datum();
0143: cursor = new DbCursor(this , null, 0);
0144: while (cursor.get(key, data, Db.DB_NEXT) == 0)
0145: count++;
0146: cursor.close();
0147: return count;
0148: }
0149:
0150: public void dump(SOIFOutputStream out) throws RDMException {
0151: Datum key, data;
0152: DbCursor cursor = null;
0153: key = new Datum();
0154: data = new Datum();
0155: cursor = new DbCursor(this , null, 0);
0156: while (cursor.get(key, data, Db.DB_NEXT) == 0) {
0157: try {
0158: SOIF s = new SOIF(data.get_data());
0159: out.write(s);
0160: } catch (Exception e) {
0161:
0162: }
0163: }
0164: cursor.close();
0165: }
0166:
0167: AutoCommitDb map(Datum key) {
0168: int ix = DbUtil.hex2uint32(key.get_data()) % extents;
0169: return db[ix];
0170: }
0171:
0172: /**
0173: * Create a new db with the given name and partitions.
0174: * @param st Security token for access control
0175: * @param rootdir database home directory
0176: * @param dbname logical database name
0177: * @param parts database partitions
0178: * @throws RDMException
0179: */
0180: static public void create(SToken st, String rootdir, String dbname,
0181: String parts[]) throws RDMException {
0182: new PartitionedDb().create1(st, rootdir, dbname, parts);
0183: }
0184:
0185: /**
0186: * Create a new db with the given name and partitions.
0187: * @param st Security token for access control
0188: * @param rootdir database home directory
0189: * @param dbname logical database name
0190: * @param parts database partitions
0191: * @throws RDMException
0192: */
0193: void create1(SToken st, String rootdir, String dbname,
0194: String parts[]) throws RDMException {
0195:
0196: int i, j, rcode = 0;
0197: Datum key, data;
0198: SOIF soif = null;
0199: AutoCommitDb rootdb = null, partdb = null;
0200: boolean exit_called = false;
0201:
0202: try {
0203:
0204: init(rootdir, 0);
0205:
0206: // create rootdb if needed
0207: rootdb = new AutoCommitDb();
0208: rootdb.open(rootdir + "/" + ROOTDB + "/" + RD_DB,
0209: Db.DB_CREATE, 0644, dbenv);
0210:
0211: for (;;) {
0212:
0213: RDMTransaction t = null;
0214:
0215: try {
0216:
0217: t = new BDBTxn(null, 0);
0218:
0219: // create rootdb extent entry if needed
0220: key = new Datum(PartitionedDb.ROOTDB);
0221: key = DbUtil.key_create(PartitionedDb.ROOTDB);
0222: data = new Datum();
0223: rcode = rootdb.fetch(key, data, 0, t);
0224: if (rcode == Db.DB_NOTFOUND) {
0225: // Log - Creating root dabatase
0226: SearchLogger.getLogger().log(Level.INFO,
0227: "PSSH_CSPSB0036");
0228: soif = new SOIF(PartitionedDb.DBASE,
0229: PartitionedDb.ROOTDB);
0230: soif.insert(PartitionedDb.EXTENT, ROOTDB + "/"
0231: + RD_DB);
0232: data = new Datum(soif
0233: .toByteArray(SOIFDb.ENCODING));
0234: rootdb.store(key, data, 0, t);
0235: }
0236:
0237: // create db extents XXX this is not checking to see if already present
0238: soif = new SOIF(PartitionedDb.DBASE, dbname);
0239: for (i = 0; i < parts.length; i++) {
0240: soif.insert(PartitionedDb.EXTENT, parts[i], i);
0241: // Put each db in its own subdir
0242: File pfile = new File(parts[i]);
0243: if (!pfile.isAbsolute())
0244: pfile = new File(rootdir + File.separator
0245: + parts[i]);
0246: File pdir = pfile.getParentFile();
0247: if (pdir != null)
0248: pdir.mkdirs();
0249: partdb = new AutoCommitDb();
0250: partdb.open(pfile.getPath(), Db.DB_CREATE,
0251: 0644, dbenv);
0252: partdb.close();
0253: partdb = null;
0254: }
0255:
0256: RDMSecurityManager.getInstance()
0257: .preDbSoifCreateCallback(soif);
0258:
0259: data.set_data(soif.toByteArray(SOIFDb.ENCODING));
0260: key = new Datum(dbname);
0261: key = DbUtil.key_create(dbname);
0262: rootdb.store(key, data, 0, t);
0263:
0264: // done with db extents
0265: t.commit(0);
0266: t = null;
0267:
0268: } catch (RDMDeadlockException e) {
0269: SearchLogger.getLogger().log(Level.FINE,
0270: "PSSH_CSPSB0001");
0271: continue;
0272: } finally {
0273: if (t != null)
0274: t.abort();
0275: }
0276: break;
0277: }
0278:
0279: rootdb.close();
0280: rootdb = null;
0281:
0282: } catch (Exception e) {
0283: SearchLogger.getLogger().log(Level.WARNING,
0284: "PSSH_CSPSB0037", e);
0285: throw new RDMException(e.getMessage());
0286: } finally {
0287: if (rootdb != null)
0288: rootdb.close();
0289: if (partdb != null)
0290: partdb.close();
0291: exit();
0292: }
0293:
0294: }
0295:
0296: /**
0297: * Delete the named db.
0298: * @param st
0299: * @param rootdir
0300: * @param dbname
0301: * @throws RDMException */
0302: static public void drop(SToken st, String rootdir, String dbname)
0303: throws RDMException {
0304: new PartitionedDb().drop1(st, rootdir, dbname);
0305: }
0306:
0307: /**
0308: * Delete the named db.
0309: * @param st
0310: * @param rootdir
0311: * @param dbname
0312: * @throws RDMException */
0313: void drop1(SToken st, String rootdir, String dbname)
0314: throws RDMException {
0315:
0316: AutoCommitDb rootdb = null;
0317: RDMTransaction t = null;
0318: Datum key, data;
0319: SOIF soif = null, tsoif = null;
0320:
0321: String p, q, buf;
0322: int i, j, rcode;
0323:
0324: try {
0325:
0326: SearchLogger.getLogger().log(Level.FINEST,
0327: "PSSH_CSPSB0038", new Object[] { rootdir, dbname });
0328:
0329: init(rootdir, 0);
0330:
0331: rootdb = new AutoCommitDb();
0332: rootdb.open(rootdir + "/" + ROOTDB + "/" + RD_DB,
0333: Db.DB_CREATE, 0644, dbenv);
0334:
0335: for (;;) {
0336:
0337: try {
0338:
0339: t = new BDBTxn(null, 0);
0340:
0341: key = new Datum(dbname);
0342: key = DbUtil.key_create(dbname);
0343: data = new Datum();
0344:
0345: if ((rcode = rootdb.fetch(key, data, 0, t)) != 0)
0346: throw new RDMException(
0347: "dbdelete: can't access database metadata: "
0348: + dbname);
0349:
0350: soif = new SOIF(data.get_data(), SOIFDb.ENCODING);
0351:
0352: // delete db extents
0353: rootdb.delete(key, 0, t);
0354: for (i = 0;; i++) {
0355: if ((p = soif.getValue(PartitionedDb.EXTENT, i)) == null)
0356: break;
0357: // Log - Deleting dabatase partition
0358: SearchLogger.getLogger().log(Level.FINE,
0359: "PSSH_CSPSB0039", p);
0360: new java.io.File(p).delete();
0361: }
0362: t.commit(0);
0363: t = null;
0364:
0365: } catch (RDMDeadlockException e) {
0366: SearchLogger.getLogger().log(Level.FINE,
0367: "PSSH_CSPSB0001");
0368: continue;
0369: } finally {
0370: if (t != null)
0371: t.abort();
0372: }
0373: break;
0374: }
0375:
0376: rootdb.close();
0377: rootdb = null;
0378:
0379: exit();
0380: } catch (Exception e) {
0381: if (!(e instanceof RDMException))
0382: e = new RDMException(e);
0383: SearchLogger.getLogger().log(Level.WARNING,
0384: "PSSH_CSPSB0040", e);
0385: throw (RDMException) e;
0386: } finally {
0387: if (rootdb != null)
0388: rootdb.close();
0389: exit();
0390: }
0391:
0392: }
0393:
0394: /**
0395: * @param st
0396: * @param t
0397: * @param flags
0398: * @throws RDMException
0399: * @return
0400: */
0401: public int purge(SToken st, RDMTransaction t) throws RDMException {
0402: int records = 0;
0403: try {
0404: for (int i = 0; i < extents; ++i) {
0405: DbTxn tx = null;
0406: if (t != null)
0407: tx = (DbTxn) t.getNativeTxn();
0408: records += db[i].db.truncate(tx, 0);
0409: }
0410: } catch (DbException dbe) {
0411: throw new RDMException(dbe);
0412: }
0413: return records;
0414: }
0415:
0416: /**
0417: * @param st
0418: * @param cmd
0419: * @throws RDMException
0420: */
0421: public void optimize(SToken st) throws RDMException {
0422: return; // do nothing
0423: }
0424:
0425: /**
0426: * open -- Opens a database
0427: * - rootdir -- db home dir
0428: * - dbname -- name of database from root.db (e.g., default)
0429: * - rw -- RDMDb.WRITER or RDMDb.WRCREAT or RDMDb.READER
0430: * - mode -- Unix mode
0431: * @param st
0432: * @param rootdir
0433: * @param dbname
0434: * @param rw
0435: * @param mode
0436: * @throws RDMException
0437: */
0438: public void open(SToken st, String rootdir, String dbname, int rw,
0439: int mode) throws RDMException {
0440: int i, j, rcode = 0;
0441: boolean err = false;
0442: Datum key, data;
0443:
0444: SOIF soif = null;
0445: AutoCommitDb rootdb = null;
0446: String d, path, name;
0447:
0448: try {
0449:
0450: SearchLogger.getLogger().log(Level.FINEST,
0451: "PSSH_CSPSB0024", new Object[] { rootdir, dbname });
0452:
0453: init(rootdir, 0);
0454:
0455: /**
0456: * BerkDB open flags:
0457: * RDMDb.WRCREAT . Db.DB_CREATE
0458: * RDMDb.WRITER . 0
0459: * RDMDb.READER . Db.DB_RDONLY
0460: * * . Db.DB_THREAD
0461: * * . Db.DB_NOMMAP
0462: */
0463: int oflags = Db.DB_THREAD
0464: | Db.DB_NOMMAP
0465: | (((rw & RDMDb.WRCREAT) != 0) ? Db.DB_CREATE
0466: : ((rw & RDMDb.WRITER) != 0) ? 0
0467: : ((rw & RDMDb.READER) != 0) ? Db.DB_RDONLY
0468: : Db.DB_RDONLY);
0469:
0470: rootdb = new AutoCommitDb();
0471: new File(rootdir + "/" + ROOTDB).mkdirs();
0472:
0473: rootdb.open(rootdir + "/" + ROOTDB + "/" + RD_DB, oflags,
0474: mode, dbenv);
0475:
0476: key = new Datum(dbname);
0477: key = DbUtil.key_create(dbname);
0478: data = new Datum();
0479:
0480: rcode = rootdb.fetch(key, data, 0, null);
0481: if (rcode == Db.DB_NOTFOUND && (rw & RDMDb.WRCREAT) != 0) {
0482: try {
0483: // Log - Db not found, creating:
0484: SearchLogger.getLogger().log(Level.FINE,
0485: "PSSH_CSPSB0041", dbname);
0486: String p;
0487: String[] parts = null;
0488: // probably a bit dubious to access config
0489: // info here, but it sure cleans things up a lot
0490: if ((p = SearchConfig
0491: .getValue(SearchConfig.DBPARTS)) != null) {
0492: parts = String2Array.string2Array(p, ',', true);
0493: } else {
0494: parts = new String[1];
0495: parts[0] = dbname + "/" + RD_DB;
0496: // log - Using default partition configuration:
0497: SearchLogger.getLogger().log(Level.FINE,
0498: "PSSH_CSPSB0042", parts[0]);
0499: }
0500: create1(st, rootdir, dbname, parts);
0501: rcode = rootdb.fetch(key, data, 0, null); // refetch the db meta data
0502: } catch (Exception e) {
0503: SearchLogger.getLogger().log(Level.WARNING,
0504: "PSSH_CSPSB0043", e);
0505: throw new RDMException("Failed to create db: "
0506: + dbname);
0507: }
0508: } else if (rcode != 0)
0509: throw new RDMException(new DbException(
0510: "Failed to open db", rcode));
0511:
0512: soif = new SOIF(data.get_data(), SOIFDb.ENCODING);
0513:
0514: db = new AutoCommitDb[PartitionedDb.MAXEXTEN];
0515: extent = new String[PartitionedDb.MAXEXTEN];
0516:
0517: for (i = 0; i < PartitionedDb.MAXEXTEN; i++) {
0518: if ((d = soif.getValue(PartitionedDb.EXTENT, i)) == null)
0519: break;
0520: String location = d;
0521: if (!new File(location).isAbsolute()) {
0522:
0523: // For 301Csp1 resolve relative extents against serverroot
0524: // XXX For Hana we might change this to dbhome dir
0525: //location = new File(SearchConfig.getValue(SearchConfig.SERVER_ROOT) + File.separator + location).getAbsolutePath();
0526:
0527: // XXX Handle legacy case: old dbs might have extents like db/name/rd.db but we have
0528: // changed the db location from server-root to database-directory, which by default
0529: // includes the /db suffix (to allow for relocation of the entire db)
0530: // so we'll say that if rootdir ends with /db and the extent begins with db/
0531: // then we'll strip off the leading db from the extent
0532: if (rootdir.endsWith("db")
0533: && location.startsWith("db/"))
0534: location = location.substring(3);
0535:
0536: location = new File(rootdir + File.separator
0537: + location).getAbsolutePath();
0538:
0539: }
0540: db[i] = new AutoCommitDb();
0541: db[i].open(location, oflags, mode, dbenv);
0542: extent[i] = d;
0543: // Log - Database partition opened: {0}
0544: SearchLogger.getLogger().log(Level.FINEST,
0545: "PSSH_CSPSB0044", location);
0546: }
0547:
0548: extents = i;
0549:
0550: // Log - Database opened: {0}, {1}
0551: SearchLogger.getLogger().log(Level.FINE, "PSSH_CSPSB0045",
0552: new Object[] { rootdir, dbname });
0553:
0554: } catch (Exception e) {
0555: if (!(e instanceof RDMException))
0556: e = new RDMException(e.getMessage());
0557: throw (RDMException) e;
0558: } finally {
0559: if (rootdb != null)
0560: rootdb.close();
0561: }
0562:
0563: }
0564:
0565: /**
0566: * Open as a single partition
0567: * @param rootdir db home directory, if null will use partition directory
0568: * @param partname
0569: * @param rw
0570: * @param mode
0571: * @throws RDMException
0572: */
0573: public void partopen(String rootdir, String partitionPath, int rw,
0574: int mode) throws Exception {
0575: int oflags = Db.DB_THREAD
0576: | Db.DB_NOMMAP
0577: | (((rw & RDMDb.WRCREAT) != 0) ? Db.DB_CREATE
0578: : ((rw & RDMDb.WRITER) != 0) ? 0
0579: : ((rw & RDMDb.READER) != 0) ? Db.DB_RDONLY
0580: : Db.DB_RDONLY);
0581: extents = 1;
0582: if (!new File(partitionPath).isAbsolute())
0583: partitionPath = new File(partitionPath).getAbsolutePath();
0584: if (rootdir == null)
0585: rootdir = partitionPath; // XXX will fail if part is in subdir
0586: init(rootdir, 0);
0587: db = new AutoCommitDb[PartitionedDb.MAXEXTEN];
0588: db[0] = new AutoCommitDb();
0589: db[0].open(partitionPath, oflags, mode, dbenv);
0590: }
0591:
0592: /**
0593: * Closes db
0594: * @param st
0595: * @throws RDMException
0596: */
0597: public void close(SToken st) throws RDMException {
0598: try {
0599: for (int i = 0; i < extents; i++) {
0600: if (db[i] != null)
0601: db[i].close();
0602: }
0603: housekeep();
0604: // Log - Database closed
0605: SearchLogger.getLogger().log(Level.FINE, "PSSH_CSPSB0046");
0606: } finally {
0607: exit();
0608: }
0609: }
0610:
0611: /**
0612: * @param dbhome
0613: * @param fatal
0614: * @throws RDMException
0615: * @return
0616: * XXX this is really an environment function
0617: */
0618: public int check_recover(String dbhome, int fatal)
0619: throws RDMException {
0620: // Not using daemons any more - need to do recovery detection here
0621: return 0; // XXX
0622: }
0623:
0624: /**
0625: * Recover the db - must be run stand alone (ie, no one else has the db open)
0626: * @param dbhome
0627: * @param fatal
0628: * @throws RDMException */
0629: public void recover(String dbhome, boolean fatal)
0630: throws RDMException {
0631: init(dbhome, (fatal ? Db.DB_RECOVER_FATAL : Db.DB_RECOVER)
0632: | Db.DB_PRIVATE);
0633: exit();
0634: }
0635:
0636: private void verifyRuntimeEnv() throws RDMException {
0637: // current java data model : 32/64 bits
0638: String sunArchDataModel = System
0639: .getProperty("sun.arch.data.model");
0640: // search.conf configured runtime data model : 32/64 bits
0641: String searchArchPlateform = SearchConfig
0642: .getValue(SearchConfig.PLATFORM);
0643: if (searchArchPlateform != null
0644: && searchArchPlateform.trim().length() > 0) {
0645: // verify runtime = version declared in search.conf
0646: if (!sunArchDataModel.equals(searchArchPlateform.trim())) {
0647: SearchLogger.getLogger().log(Level.SEVERE,
0648: "PSSH_CSPSB0088",
0649: new Object[] { "64", sunArchDataModel });
0650: throw new RDMException("invalid java runtime "
0651: + sunArchDataModel + "bit architecture");
0652: }
0653: } else {
0654: if (!sunArchDataModel.equals("32")) {
0655: SearchLogger.getLogger().log(Level.SEVERE,
0656: "PSSH_CSPSB0088",
0657: new Object[] { "32", sunArchDataModel });
0658: throw new RDMException("invalid java runtime "
0659: + sunArchDataModel + "bit architecture");
0660: }
0661: }
0662: // verifying that the bdb create env is matching current configuration
0663: // if the initial runtime environment is different, then throw exception
0664: // there should be an automatic recovery mechanism here. This require
0665: // search server stopping, when this env mismatch might ocurre upon
0666: // call from cmdlines
0667: File dbEnvFile = new File(SearchConfig
0668: .getValue(SearchConfig.DBDIR)
0669: + File.separator + "DB.env");
0670: try {
0671: if (!dbEnvFile.exists()) {
0672: // first time run, initialise the file with current run time
0673: // doesn't exist then create it
0674: Properties p = new Properties();
0675: p.setProperty("os.arch", System.getProperty("os.arch"));
0676: p.setProperty("sun.arch.data.model", System
0677: .getProperty("sun.arch.data.model"));
0678: p.store(new FileOutputStream(dbEnvFile),
0679: "Initial berkeley DB runtime environement");
0680: } else {
0681: Properties p = new Properties();
0682: p.load(new FileInputStream(dbEnvFile));
0683: if (!sunArchDataModel.equals(p
0684: .getProperty("sun.arch.data.model"))
0685: || !System.getProperty("os.arch").equals(
0686: p.getProperty("os.arch"))) {
0687: SearchLogger
0688: .getLogger()
0689: .log(
0690: Level.SEVERE,
0691: "PSSH_CSPSB0089",
0692: new Object[] {
0693: p
0694: .getProperty("sun.arch.data.model"),
0695: p.getProperty("os.arch"),
0696: sunArchDataModel,
0697: System
0698: .getProperty("os.arch") });
0699: throw new RDMException(
0700: "database was initialized with different 32/64 bits environment");
0701: }
0702: }
0703: } catch (IOException ioe) {
0704: throw new RDMException(
0705: "failed to access/update db environment file", ioe);
0706: }
0707: }
0708:
0709: synchronized void init(String dbhome, int extraFlags)
0710: throws RDMException {
0711: verifyRuntimeEnv();
0712:
0713: String abshome = null;
0714: try {
0715: abshome = new File(dbhome).getCanonicalPath();
0716: } catch (Exception e) {
0717: throw new RDMException("Can't access " + dbhome);
0718: }
0719: dbi = (DbEnvInfo) dbEnvInfoMap.get(abshome);
0720: if (dbi == null) {
0721: dbi = new DbEnvInfo();
0722: dbi.dbenv = null;
0723: dbi.dbenv_opened = 0;
0724: dbi.dbhome = abshome;
0725: dbEnvInfoMap.put(abshome, dbi);
0726: }
0727:
0728: // Log - init({0}, {1}) dbi.dbenv_opened
0729: SearchLogger.getLogger().log(Level.FINEST, "PSSH_CSPSB0047",
0730: new Object[] { dbhome, Integer.toString(extraFlags) });
0731:
0732: if (dbi.dbenv_opened > 0) {
0733: ++dbi.dbenv_opened;
0734: dbenv = dbi.dbenv;
0735: return;
0736: }
0737:
0738: // init signal handlers
0739: //XXX signal(SIGINT, catch);
0740:
0741: // recover db if necessary - must run before we open database
0742: check_recover(dbhome, 0);
0743:
0744: /**
0745: * Always use:
0746: * Db.DB_INIT_MPOOL - shared memory pools
0747: * Db.DB_CREATE - to create files
0748: * Db.DB_THREAD - for threaded data structs
0749: * Db.DB_USE_ENVIRON - for TMPDIR
0750: * Db.DB_NOMMAP - no mmap() -- creates too large processes
0751: */
0752: int flags = Db.DB_INIT_LOG | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL
0753: | Db.DB_INIT_TXN | Db.DB_CREATE | Db.DB_THREAD
0754: | Db.DB_USE_ENVIRON | extraFlags;
0755:
0756: dbi.dbenv = dbenv = new DbEnv(0);
0757:
0758: if (SearchLogger.getLogger().isLoggable(Level.FINEST)) {
0759: // this is not strictly necessary in production since
0760: // exceptions describe most errors adequately
0761: try {
0762: dbenv.set_error_stream(new FileOutputStream(
0763: SearchConfig.getValue(SearchConfig.SERVER_ROOT)
0764: + "/logs/db.log", true));
0765: dbenv.set_verbose(Db.DB_VERB_CHKPOINT, true);
0766: dbenv.set_verbose(Db.DB_VERB_DEADLOCK, true);
0767: dbenv.set_verbose(Db.DB_VERB_RECOVERY, true);
0768: dbenv.set_verbose(Db.DB_VERB_WAITSFOR, true);
0769: } catch (Exception e) {
0770: SearchLogger.getLogger().log(Level.WARNING,
0771: "PSSH_CSPSB0048", e);
0772: }
0773: }
0774:
0775: int cachesize = PartitionedDb.DEFAULT_CACHESIZE;
0776: String p = SearchConfig.getValue(SearchConfig.DB_CACHE_SIZE);
0777: if (p != null) {
0778: try {
0779: cachesize = Integer.parseInt(p) * 1024;
0780: } catch (Exception e) {
0781: }
0782: }
0783:
0784: try {
0785: dbenv.set_cachesize(0, cachesize, 0);
0786:
0787: // run the deadlock detector on every lock conflict
0788: dbenv.set_lk_detect(Db.DB_LOCK_YOUNGEST);
0789: dbenv.set_flags(Db.DB_TXN_NOSYNC /*| Db.DB_NOMMAP*/, true);
0790:
0791: // Log - DbEnv.get_version_string() = {0} , dbenv.open({1}, {2})
0792: SearchLogger.getLogger().log(
0793: Level.FINE,
0794: "PSSH_CSPSB0049",
0795: new Object[] { DbEnv.get_version_string(), dbhome,
0796: Integer.toString(flags) });
0797: try {
0798: dbenv.open(dbhome, flags, 0644);
0799: } catch (java.io.FileNotFoundException e) {
0800: throw new RDMException(e.getMessage());
0801: } catch (DbException e) {
0802: // Log - dbenv.open() Failed
0803: SearchLogger.getLogger().log(Level.WARNING,
0804: "PSSH_CSPSB0050", e);
0805: throw e;
0806: }
0807: BDBTxn.init(dbi.dbenv);
0808: dbi.dbenv_opened++;
0809: } catch (DbException dbe) {
0810: throw new RDMException(dbe);
0811: }
0812: }
0813:
0814: synchronized void exit() throws RDMException {
0815: if (dbi.dbenv_opened <= 0) {
0816: // Log - in db exit - bad dbenv count {0}
0817: SearchLogger.getLogger()
0818: .log(
0819: Level.WARNING,
0820: "PSSH_CSPSB0087",
0821: new Object[] { Integer
0822: .toString(dbi.dbenv_opened) });
0823: return;
0824: }
0825: if (--dbi.dbenv_opened == 0) {
0826: try {
0827: // Log - dbenv.close()
0828: SearchLogger.getLogger().log(Level.FINE,
0829: "PSSH_CSPSB0052");
0830: dbi.dbenv.close(0);
0831: dbi.dbenv = null;
0832: dbEnvInfoMap.remove(dbi.dbhome);
0833: } catch (DbException e) {
0834: SearchLogger.getLogger().log(Level.WARNING,
0835: "PSSH_CSPSB0053", e);
0836: throw new RDMException(e);
0837: }
0838: }
0839: }
0840:
0841: /**
0842: * repartition - changes the partition array and reorganizes data
0843: *
0844: * Can run standalone with t == null
0845: * @param st
0846: * @param rootdir db home directory, if null will use partition directory
0847: * @param dbname database logical name
0848: * @param newParts - new partition array
0849: * @return number of relocated keys
0850: * @throws RDMException
0851: */
0852: public int repartition(SToken st, String rootdir, String dbname,
0853: String[] newParts) throws RDMException {
0854: // Log - Repartitioning database
0855: SearchLogger.getLogger().log(Level.FINE, "PSSH_CSPSB0054");
0856: Arrays.sort(newParts);
0857: String[] oldParts = extent;
0858: Arrays.sort(oldParts, 0, extents - 1);
0859: int txnSize = 50; // Reorg this many RDs each txn
0860:
0861: // create a new db with new partition array
0862: PartitionedDb newDb = new PartitionedDb();
0863: newDb.create1(st, rootdir, dbname, newParts);
0864: newDb.open(st, rootdir, dbname, RDMDb.WRITER, 0644);
0865:
0866: // reorganize data
0867: int count = 0;
0868: Datum key, data;
0869: RDMTransaction t = null;
0870: Dbc cursor = null;
0871: key = new Datum();
0872: data = new Datum();
0873: for (int i = 0; i < extents; ++i) {
0874: AutoCommitDb adb = db[i];
0875: key.set_data("");
0876: data.set_data("");
0877: boolean done = false;
0878: while (!done) {
0879: // remember the start of this block for retrying
0880: Datum start_key = new Datum(key.get_data());
0881: Datum start_data = new Datum(data.get_data());
0882: for (;;) {
0883: // retry loop
0884: try {
0885: t = new BDBTxn(null, 0);
0886: cursor = adb.db.cursor(
0887: (DbTxn) t.getNativeTxn(), 0);
0888: // go to start of block
0889: key.set_data(start_key.get_data());
0890: data.set_data(start_data.get_data());
0891: cursor.get(key, data, Db.DB_SET_RANGE);
0892: int cnt = 0;
0893: int rcode = 0;
0894: do {
0895: if (!adb.location
0896: .equals(newDb.map(key).location)) {
0897: // the key is in the wrong partition -- move it!
0898: SearchLogger
0899: .getLogger()
0900: .log(
0901: Level.FINEST,
0902: "PSSH_CSPSB0055",
0903: new Object[] {
0904: key.get_data(),
0905: adb.location,
0906: newDb.map(key).location });
0907: newDb.map(key).store(key, data, 0, t);
0908: adb.delete(key, 0, t);
0909: ++cnt;
0910: ++count;
0911: }
0912: } while (cnt < txnSize
0913: && (rcode = cursor.get(key, data,
0914: Db.DB_NEXT)) == 0);
0915: if (rcode == Db.DB_NOTFOUND)
0916: done = true; // reached end of current partition
0917: else if (rcode != 0)
0918: throw new RDMException(
0919: "internal error: cursor get next returned "
0920: + rcode);
0921: cursor.close();
0922: cursor = null;
0923: t.commit(0);
0924: t = null;
0925: } catch (DbDeadlockException e) {
0926: SearchLogger.getLogger().log(Level.FINE,
0927: "PSSH_CSPSB0002");
0928: continue;
0929: } catch (DbException dbe) {
0930: SearchLogger.getLogger().log(Level.FINE,
0931: "PSSH_CSPSB0003");
0932: throw new RDMException(dbe);
0933: } finally {
0934: if (cursor != null)
0935: try {
0936: cursor.close();
0937: } catch (DbException dbe) {
0938: throw new RDMException(dbe);
0939: }
0940: if (t != null)
0941: t.abort();
0942: }
0943: break;
0944: }
0945: }
0946: }
0947: newDb.close(st);
0948: return count;
0949: }
0950:
0951: /**
0952: * housekeep - checkpoint db and clean logs, should be called
0953: * periodically when db modifications are made
0954: *
0955: * ck_kb : checkpoint if ck_kb kb have been written to the log since last ckp
0956: * ck_min : checkpoint if ck_min minutes have elapsed since last ckp
0957: * logclean_min : clean logs if log_delay seconds have elapsed since last done
0958: * (-1 to disable)
0959: */
0960: void housekeep() throws RDMException {
0961:
0962: // XXX make this a thread???
0963: // Log - housekeeping
0964: SearchLogger.getLogger().log(Level.FINEST, "PSSH_CSPSB0056");
0965:
0966: if (ck_kb != -1 || ck_min != -1) {
0967: int this _ck_kb = ck_kb;
0968: int this _ck_min = ck_min;
0969: for (;;) {
0970: try {
0971: dbenv.txn_checkpoint(this _ck_kb, this _ck_min, 0);
0972: } catch (DbException e) {
0973: if (e.get_errno() == Db.DB_INCOMPLETE) {
0974: // Log - Transaction checkpoint incomplete
0975: SearchLogger.getLogger().log(Level.FINEST,
0976: "PSSH_CSPSB0057", e);
0977: try {
0978: Thread.sleep(50);
0979: } catch (Exception ignored) {
0980: }
0981: this _ck_kb = 0;
0982: this _ck_min = 0;
0983: continue; // retry if incomplete
0984: } else {
0985: // Log - txn_checkpoint() failed
0986: SearchLogger.getLogger().log(Level.WARNING,
0987: "PSSH_CSPSB0058", e);
0988: throw new RDMException(e);
0989: }
0990: }
0991: break;
0992: }
0993: }
0994:
0995: String[] log_list = null;
0996: long now;
0997: long x1 = new java.util.Date().getTime();
0998: long z = x1 - dbi.last_log;
0999: boolean f = z >= logclean_min * 60 * 1000;
1000: if (logclean_min != -1
1001: && ((now = new java.util.Date().getTime())
1002: - dbi.last_log >= logclean_min * 60 * 1000)) {
1003: // Log - doing log clean check
1004: SearchLogger.getLogger()
1005: .log(Level.FINEST, "PSSH_CSPSB0059");
1006: dbi.last_log = now;
1007: try {
1008: log_list = dbenv.log_archive(Db.DB_ARCH_ABS);
1009: } catch (DbException e) {
1010: // Log - log_archive failed
1011: SearchLogger.getLogger().log(Level.WARNING,
1012: "PSSH_CSPSB0060");
1013: throw new RDMException(e);
1014: }
1015: if (log_list != null) {
1016: int i;
1017: for (i = 0; i < log_list.length; ++i) {
1018: // Log - Cleaning database log: {0}
1019: SearchLogger.getLogger().log(Level.FINEST,
1020: "PSSH_CSPSB0061", log_list[i]);
1021: new java.io.File(log_list[i]).delete();
1022: }
1023: }
1024: }
1025: }
1026:
1027: }
|