001: /**
002: * Sequoia: Database clustering technology.
003: * Copyright (C) 2002-2004 French National Institute For Research In Computer
004: * Science And Control (INRIA).
005: * Contact: sequoia@continuent.org
006: *
007: * Licensed under the Apache License, Version 2.0 (the "License");
008: * you may not use this file except in compliance with the License.
009: * You may obtain a copy of the License at
010: *
011: * http://www.apache.org/licenses/LICENSE-2.0
012: *
013: * Unless required by applicable law or agreed to in writing, software
014: * distributed under the License is distributed on an "AS IS" BASIS,
015: * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016: * See the License for the specific language governing permissions and
017: * limitations under the License.
018: *
019: * Initial developer(s): Emmanuel Cecchet.
020: * Contributor(s): ______________________________________.
021: */package org.continuent.sequoia.controller.cache.metadata;
022:
023: import java.util.Hashtable;
024:
025: import org.continuent.sequoia.common.i18n.Translate;
026: import org.continuent.sequoia.common.log.Trace;
027: import org.continuent.sequoia.common.protocol.Field;
028: import org.continuent.sequoia.common.xml.DatabasesXmlTags;
029: import org.continuent.sequoia.controller.requests.AbstractRequest;
030:
031: /**
032: * This class implements a ResultSet metadata cache.
033: * <p>
034: * ResultSet Fields are kept here to prevent recomputing them and allocating
035: * them each time a query is executed.
036: *
037: * @author <a href="mailto:Emmanuel.Cecchet@inria.fr">Emmanuel Cecchet </a>
038: * @version 1.0
039: */
040: public class MetadataCache {
041: private static Trace logger = Trace.getLogger(MetadataCache.class
042: .getName());
043:
044: // SQL -> Field[]
045: private Hashtable metadataCache;
046:
047: // Schema.Table.Column name -> Field
048: private Hashtable fieldCache;
049: private int maxNbOfMetadata;
050: private int maxNbOfField;
051:
052: /**
053: * Constructor for MetadataCache.
054: *
055: * @param maxNbOfMetadata maximum nb of entries in metadata cache
056: * @param maxNbOfField maximum nb of entries in field cache
057: */
058: public MetadataCache(int maxNbOfMetadata, int maxNbOfField) {
059: metadataCache = new Hashtable(maxNbOfMetadata == 0 ? 10000
060: : maxNbOfMetadata);
061: fieldCache = new Hashtable(maxNbOfField == 0 ? 100
062: : maxNbOfField);
063: if (maxNbOfMetadata < 0)
064: throw new RuntimeException(Translate.get(
065: "cache.metadata.invalid.size", maxNbOfMetadata));
066: if (maxNbOfMetadata == 0)
067: this .maxNbOfMetadata = Integer.MAX_VALUE;
068: else
069: this .maxNbOfMetadata = maxNbOfMetadata;
070: if (maxNbOfField < 0)
071: throw new RuntimeException(Translate.get(
072: "cache.metadata.invalid.size", maxNbOfField));
073: if (maxNbOfField == 0)
074: this .maxNbOfField = Integer.MAX_VALUE;
075: else
076: this .maxNbOfField = maxNbOfField;
077: }
078:
079: /**
080: * Flush the cache
081: */
082: public void flushCache() {
083: synchronized (metadataCache) {
084: metadataCache.clear();
085: }
086: synchronized (fieldCache) {
087: fieldCache.clear();
088: }
089: }
090:
091: /**
092: * Get metadata associated to a request.
093: * <p>
094: * Returns null if the cache contains no metadata for the given request.
095: *
096: * @param request the request we look for
097: * @return the metadata or null if not in cache
098: */
099: public Field[] getMetadata(AbstractRequest request) {
100: return (Field[]) metadataCache.get(request.getUniqueKey());
101: }
102:
103: /**
104: * Add a metadata entry to the cache and associate it to the given request.
105: *
106: * @param request request to which the metadata belong
107: * @param metadata metadata to cache
108: */
109: public void addMetadata(AbstractRequest request, Field[] metadata) {
110: // Note that the underlying cache Hashtable is synchronized and we usually
111: // do not need to synchronize on it.
112: // As we will have to add a cache entry, check if the cache size is ok
113: // else remove the first entry of the hashtable.
114: while (metadataCache.size() > maxNbOfMetadata) { // Remove first entry from Hashtable. We need to synchronize here to be
115: // sure that we are not trying to concurrently remove the first cache
116: // entry.
117: synchronized (metadataCache) {
118: try {
119: metadataCache.remove(metadataCache.keys()
120: .nextElement());
121: } catch (Exception ignore) {
122: break;
123: }
124: }
125: }
126:
127: // Add to cache
128: try {
129: metadataCache.put(request.getUniqueKey(), metadata);
130: } catch (OutOfMemoryError oome) {
131: flushCache();
132: System.gc();
133: logger.warn(Translate
134: .get("cache.memory.error.cache.flushed", this
135: .getClass()));
136: }
137: }
138:
139: /**
140: * Get the field corresponding to a column name.
141: * <p>
142: * Returns null if the cache contains no field for the given name.
143: *
144: * @param fullyQualifiedFieldName the field name (table.column.label) to look
145: * for
146: * @return the corresponding Field or null if not in cache
147: */
148: public Field getField(String fullyQualifiedFieldName) {
149: return (Field) fieldCache.get(fullyQualifiedFieldName);
150: }
151:
152: /**
153: * Add a Field entry to the cache and associate it to the given name.
154: *
155: * @param fullyQualifiedFieldName table.column.label name that uniquely
156: * identifies the field
157: * @param field field to cache
158: */
159: public void addField(String fullyQualifiedFieldName, Field field) {
160: // Note that the underlying cache Hashtable is synchronized and we usually
161: // do not need to synchronize on it.
162: // As we will have to add a cache entry, check if the cache size is ok
163: // else remove the first entry of the hashtable.
164: while (fieldCache.size() > maxNbOfField) { // Remove first entry from Hashtable. We need to synchronize here to be
165: // sure that we are not trying to concurrently remove the first cache
166: // entry.
167: synchronized (fieldCache) {
168: try {
169: fieldCache.remove(fieldCache.keys().nextElement());
170: } catch (Exception ignore) {
171: break;
172: }
173: }
174: }
175: // Add to cache
176: try {
177: fieldCache.put(fullyQualifiedFieldName, field);
178: } catch (OutOfMemoryError oome) {
179: flushCache();
180: System.gc();
181: logger.warn(Translate
182: .get("cache.memory.error.cache.flushed", this
183: .getClass()));
184: }
185: }
186:
187: /**
188: * Get xml information about this ParsingCache
189: *
190: * @return <code>String</code> in xml formatted text
191: */
192: public String getXml() {
193: return "<"
194: + DatabasesXmlTags.ELT_MetadataCache
195: + " "
196: + DatabasesXmlTags.ATT_maxNbOfMetadata
197: + "=\""
198: + maxNbOfMetadata
199: + "\" "
200: + DatabasesXmlTags.ATT_maxNbOfField
201: + "=\""
202: + (maxNbOfField == Integer.MAX_VALUE ? 0 : maxNbOfField)
203: + "\"/>";
204: }
205:
206: }
|