001: /**
002: * Sequoia: Database clustering technology.
003: * Copyright (C) 2002-2004 French National Institute For Research In Computer
004: * Science And Control (INRIA).
005: * Contact: sequoia@continuent.org
006: *
007: * Licensed under the Apache License, Version 2.0 (the "License");
008: * you may not use this file except in compliance with the License.
009: * You may obtain a copy of the License at
010: *
011: * http://www.apache.org/licenses/LICENSE-2.0
012: *
013: * Unless required by applicable law or agreed to in writing, software
014: * distributed under the License is distributed on an "AS IS" BASIS,
015: * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016: * See the License for the specific language governing permissions and
017: * limitations under the License.
018: *
019: * Initial developer(s): Emmanuel Cecchet.
020: * Contributor(s): ______________________________________.
021: */package org.continuent.sequoia.controller.cache.parsing;
022:
023: import java.sql.SQLException;
024: import java.util.Hashtable;
025:
026: import org.continuent.sequoia.common.i18n.Translate;
027: import org.continuent.sequoia.common.log.Trace;
028: import org.continuent.sequoia.common.xml.DatabasesXmlTags;
029: import org.continuent.sequoia.controller.requestmanager.ParserThread;
030: import org.continuent.sequoia.controller.requestmanager.RequestManager;
031: import org.continuent.sequoia.controller.requests.AbstractRequest;
032: import org.continuent.sequoia.controller.requests.ParsingGranularities;
033: import org.continuent.sequoia.controller.requests.RequestType;
034:
035: /**
036: * This class implements a request parsing cache.
037: *
038: * @author <a href="mailto:Emmanuel.Cecchet@inria.fr">Emmanuel Cecchet </a>
039: * @version 1.0
040: */
041: public class ParsingCache {
042: private static Trace logger = Trace.getLogger(ParsingCache.class
043: .getName());
044: // SQL -> parsed request
045: private Hashtable cache;
046: // SQL -> CurrentlyParsingEntry
047: private Hashtable currentlyParsing;
048: private RequestManager requestManager;
049: private int granularity;
050: private int maxNbOfEntries;
051: // Default is parse when needed
052: private boolean backgroundParsing;
053: // Default is case insensitive
054: private boolean caseSensitiveParsing;
055:
056: /**
057: * CurrentlyParsingEntry contains a (Request,ParserThread) which is an element
058: * of the currentlyParsing Hashtable.
059: *
060: * @author <a href="mailto:Emmanuel.Cecchet@inria.fr">Emmanuel Cecchet </a>
061: * @version 1.0
062: */
063: private class CurrentlyParsingEntry {
064: private ParserThread parserThread;
065: private AbstractRequest request;
066:
067: /**
068: * Constructor for CurrentlyParsingEntry.
069: *
070: * @param parserThread creating parser thread
071: * @param request request to parse
072: */
073: public CurrentlyParsingEntry(ParserThread parserThread,
074: AbstractRequest request) {
075: this .parserThread = parserThread;
076: this .request = request;
077: }
078:
079: /**
080: * Returns the parserThread.
081: *
082: * @return ParserThread
083: */
084: public ParserThread getParserThread() {
085: return parserThread;
086: }
087:
088: /**
089: * Returns the request.
090: *
091: * @return AbstractRequest
092: */
093: public AbstractRequest getRequest() {
094: return request;
095: }
096:
097: }
098:
099: /**
100: * Constructor for ParsingCache.
101: *
102: * @param size maximum cache size in nb of entries
103: * @param backgroundParsing true if the parsing should be done in background
104: * by a ParserThread
105: */
106: public ParsingCache(int size, boolean backgroundParsing) {
107: cache = new Hashtable(size == 0 ? 10000 : size);
108: currentlyParsing = new Hashtable();
109: if (size < 0)
110: throw new RuntimeException(Translate.get(
111: "cache.parsing.invalid.size", size));
112: if (size == 0)
113: this .maxNbOfEntries = Integer.MAX_VALUE;
114: else
115: this .maxNbOfEntries = size;
116: this .backgroundParsing = backgroundParsing;
117: caseSensitiveParsing = false;
118: }
119:
120: /**
121: * Returns the granularity value.
122: *
123: * @return Returns the granularity.
124: */
125: public int getGranularity() {
126: return granularity;
127: }
128:
129: /**
130: * Sets the granularity value.
131: *
132: * @param granularity The granularity to set.
133: */
134: public void setGranularity(int granularity) {
135: this .granularity = granularity;
136: }
137:
138: /**
139: * Returns the requestManager value.
140: *
141: * @return Returns the requestManager.
142: */
143: public RequestManager getRequestManager() {
144: return requestManager;
145: }
146:
147: /**
148: * Sets the requestManager value.
149: *
150: * @param requestManager The requestManager to set.
151: */
152: public void setRequestManager(RequestManager requestManager) {
153: this .requestManager = requestManager;
154: }
155:
156: /**
157: * If the same SQL query is found in the cache, the parsing is cloned into the
158: * given request. If backgroundParsing is set to true, then a ParserThread
159: * starts parsing the request in background else nothing is done on a cache
160: * miss.
161: *
162: * @param request the request you look for
163: */
164: public void getParsingFromCache(AbstractRequest request) {
165: if (request.isParsed())
166: return;
167:
168: String sql = request.getUniqueKey();
169: AbstractRequest parsedRequest = (AbstractRequest) cache
170: .get(sql);
171:
172: if (parsedRequest != null) { // Cache hit, clone the parsing
173: request.cloneParsing(parsedRequest);
174: return;
175: } else if (backgroundParsing) { // Cache miss, start parsing the request in background
176: synchronized (currentlyParsing) {
177: if (!currentlyParsing.contains(sql)) { // Nobody else is trying to parse the same SQL query
178: ParserThread pt = new ParserThread(request,
179: requestManager.getDatabaseSchema(),
180: granularity, caseSensitiveParsing);
181: currentlyParsing.put(sql,
182: new CurrentlyParsingEntry(pt, request));
183: }
184: }
185: }
186: }
187:
188: /**
189: * Method getParsingFromCacheAndParseIfMissing.
190: *
191: * @param request the request we look for
192: * @exception SQLException if an error occurs
193: */
194: public void getParsingFromCacheAndParseIfMissing(
195: AbstractRequest request) throws SQLException {
196: if (request.isParsed())
197: return;
198:
199: // Check cache
200: String instanciatedSQL = request.getUniqueKey();
201: AbstractRequest parsedRequest = (AbstractRequest) cache
202: .get(instanciatedSQL);
203:
204: try {
205: if (parsedRequest == null) { // Cache miss
206: String sqlSkeleton = request.getSqlOrTemplate();
207: String sql;
208: if (sqlSkeleton != null) { // Missed with instanciated query, try with skeleton
209: sql = sqlSkeleton;
210: parsedRequest = (AbstractRequest) cache.get(sql);
211: if (parsedRequest != null) { // Cache hit with skeleton
212: request.cloneParsing(parsedRequest);
213: return;
214: }
215: } else
216: sql = instanciatedSQL;
217:
218: // Full cache miss. Note that the underlying cache Hashtable is
219: // synchronized and we usually do not need to synchronize on it.
220: // As we will have to add a cache entry, check if the cache size is ok
221: // else remove the first entry of the hashtable.
222: while (cache.size() > maxNbOfEntries) { // Remove first entry from Hashtable. We need to synchronize here to
223: // be
224: // sure that we are not trying to concurrently remove the first cache
225: // entry.
226: synchronized (cache) {
227: try {
228: cache.remove(cache.keys().nextElement());
229: } catch (Exception ignore) {
230: break;
231: }
232: }
233: }
234:
235: // Both skeleton and instanciated missed
236: if (backgroundParsing) {
237: // Find the parsing thread and request (note that Hasthtable is
238: // synchronized)
239: CurrentlyParsingEntry cpe = (CurrentlyParsingEntry) currentlyParsing
240: .get(sql);
241: if (cpe != null) {
242: ParserThread pt = cpe.getParserThread();
243: try {
244: if (pt != null) {
245: // Wait for completion
246: pt.join();
247: synchronized (currentlyParsing) {
248: currentlyParsing.remove(sql);
249: }
250:
251: // Update cache
252: if ((granularity != ParsingGranularities.COLUMN_UNIQUE)
253: || (sqlSkeleton == null))
254: // No skeleton or no uniqueness criteria, add the query
255: cache.put(instanciatedSQL, cpe
256: .getRequest());
257: else { // We have a skeleton and COLUMN_UNIQUE parsing
258: if (request.getCacheAbility() != RequestType.UNIQUE_CACHEABLE)
259: // It is NOT UNIQUE, add the skeleton
260: cache.put(sqlSkeleton, cpe
261: .getRequest());
262: else
263: // It is UNIQUE, add the instanciated query
264: cache.put(instanciatedSQL, cpe
265: .getRequest());
266: }
267: }
268: } catch (InterruptedException failed) {
269: throw new SQLException(
270: Translate
271: .get(
272: "cache.parsing.failed.join.parser.thread",
273: new String[] {
274: ""
275: + request
276: .getId(),
277: failed
278: .getMessage() }));
279: }
280: }
281: }
282: // Parse it now because we didn't parse in background or
283: // backgroundParsing has failed for any obscure reason.
284: request.parse(requestManager.getDatabaseSchema(),
285: granularity, caseSensitiveParsing);
286:
287: // Update cache
288: if ((sqlSkeleton != null)
289: && (granularity == ParsingGranularities.COLUMN_UNIQUE)
290: && (request.getCacheAbility() == RequestType.UNIQUE_CACHEABLE))
291: // If this is a unique request, we must put the instanciated query in
292: // the cache to retrieve the exact pk value.
293: cache.put(instanciatedSQL, request);
294: else
295: cache.put(sql, request);
296: } else
297: // Cache hit
298: request.cloneParsing(parsedRequest);
299: } catch (OutOfMemoryError oome) {
300: synchronized (cache) {
301: cache.clear();
302: }
303: System.gc();
304: logger.warn(Translate
305: .get("cache.memory.error.cache.flushed", this
306: .getClass()));
307: }
308: }
309:
310: /**
311: * Returns the backgroundParsing.
312: *
313: * @return boolean
314: */
315: public boolean isBackgroundParsing() {
316: return backgroundParsing;
317: }
318:
319: /**
320: * Sets the background parsing. If true the request are parsed in background
321: * by a separate thread that is created for this purpose.
322: *
323: * @param backgroundParsing The backgroundParsing to set
324: */
325: public void setBackgroundParsing(boolean backgroundParsing) {
326: this .backgroundParsing = backgroundParsing;
327: }
328:
329: /**
330: * Sets the parsing case sensitivity
331: *
332: * @param isCaseSensitiveParsing true if parsing is case sensitive
333: */
334: public void setCaseSensitiveParsing(boolean isCaseSensitiveParsing) {
335: this .caseSensitiveParsing = isCaseSensitiveParsing;
336: }
337:
338: /**
339: * Returns the caseSensitiveParsin.
340: *
341: * @return boolean
342: */
343: public boolean isCaseSensitiveParsing() {
344: return caseSensitiveParsing;
345: }
346:
347: /**
348: * Get xml information about this ParsingCache
349: *
350: * @return <code>String</code> in xml formatted text
351: */
352: public String getXml() {
353: return "<" + DatabasesXmlTags.ELT_ParsingCache + " "
354: + DatabasesXmlTags.ATT_backgroundParsing + "=\""
355: + backgroundParsing + "\" "
356: + DatabasesXmlTags.ATT_maxNbOfEntries + "=\""
357: + maxNbOfEntries + "\"/>";
358: }
359:
360: /**
361: * Retrieves the current request parsing cache configuration. The returned
362: * string contains:
363: * <ul>
364: * <li>granularity
365: * <li>maximum number of entries
366: * <li>background parsing flag
367: * <li>case sensitivity
368: * </ul>
369: *
370: * @return a String containing the configuration of the cache
371: */
372: public String dumpCacheConfig() {
373: StringBuffer sb = new StringBuffer();
374: sb.append(Translate.get("cache.dump")); //$NON-NLS-1$
375: sb
376: .append(Translate
377: .get(
378: "cache.granularity", ParsingGranularities.getInformation(granularity))); //$NON-NLS-1$
379: sb.append(Translate.get("cache.max.entries", maxNbOfEntries)); //$NON-NLS-1$
380: sb.append(Translate.get(
381: "cache.background.parsing", backgroundParsing)); //$NON-NLS-1$
382: sb.append(Translate.get(
383: "cache.case.sensitive", caseSensitiveParsing)); //$NON-NLS-1$
384: return sb.toString();
385: }
386:
387: /**
388: * Retrieves the number of entries currently contained in the cache
389: *
390: * @return number of cache entries
391: */
392: public int getNumberOfCacheEntries() {
393: return cache.size();
394: }
395:
396: /**
397: * Prints entries of the cache, from beginIndex to (beginIndex + max) or to
398: * the last entry if beginIndex+max is out of bounds
399: *
400: * @param beginIndex entry from which to start dump
401: * @param max maximum number of entries to dump
402: * @return a string containing the cache entries (request unique key + short
403: * description string) from beginIndex to the last entry or to
404: * beginIndex+max.
405: */
406: public String dumpCacheEntries(int beginIndex, int max) {
407: StringBuffer sb = new StringBuffer();
408: int i = beginIndex;
409: Object[] keys = cache.keySet().toArray();
410: while (i < keys.length && i < (beginIndex + max)) {
411: sb.append(Translate.get("cache.entry", new String[] { //$NON-NLS-1$
412: keys[i].toString(),
413: ((AbstractRequest) cache.get(keys[i]))
414: .toShortDebugString() }));
415: i++;
416: }
417: return sb.toString();
418: }
419:
420: /**
421: * Dumps requests that are currently beeing parsed
422: *
423: * @return a String containing the entries (request unique key + short
424: * description string) currently beeing parsed
425: */
426: public String dumpCurrentlyParsedEntries() {
427: StringBuffer sb = new StringBuffer();
428: if (currentlyParsing != null && currentlyParsing.size() > 0) {
429: sb.append(Translate.get("cache.currently.parsing.entries")); //$NON-NLS-1$
430: for (int i = 0; i < currentlyParsing.size(); i++) {
431: sb
432: .append(Translate
433: .get(
434: "cache.currently.parsing.entry", new String[] { //$NON-NLS-1$
435: currentlyParsing
436: .keySet()
437: .toArray()[i]
438: .toString(),
439: ((CurrentlyParsingEntry) currentlyParsing
440: .get(currentlyParsing
441: .keySet()
442: .toArray()[i])).request
443: .toShortDebugString() }));
444: }
445: }
446: return sb.toString();
447: }
448: }
|