001: /**
002: * Licensed to the Apache Software Foundation (ASF) under one or more
003: * contributor license agreements. See the NOTICE file distributed with
004: * this work for additional information regarding copyright ownership.
005: * The ASF licenses this file to You under the Apache License, Version 2.0
006: * (the "License"); you may not use this file except in compliance with
007: * the License. You may obtain a copy of the License at
008: *
009: * http://www.apache.org/licenses/LICENSE-2.0
010: *
011: * Unless required by applicable law or agreed to in writing, software
012: * distributed under the License is distributed on an "AS IS" BASIS,
013: * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014: * See the License for the specific language governing permissions and
015: * limitations under the License.
016: */package org.apache.solr.search;
017:
018: import org.apache.solr.core.SolrCore;
019: import org.apache.solr.core.SolrException;
020: import org.apache.solr.util.NamedList;
021: import org.apache.solr.util.SimpleOrderedMap;
022:
023: import java.util.*;
024: import java.util.concurrent.atomic.AtomicLong;
025: import java.io.IOException;
026: import java.net.URL;
027:
028: /**
029: * @author yonik
030: * @version $Id: LRUCache.java 501512 2007-01-30 18:36:32Z yonik $
031: */
032: public class LRUCache implements SolrCache {
033:
034: /* An instance of this class will be shared across multiple instances
035: * of an LRUCache at the same time. Make sure everything is thread safe.
036: */
037: private static class CumulativeStats {
038: AtomicLong lookups = new AtomicLong();
039: AtomicLong hits = new AtomicLong();
040: AtomicLong inserts = new AtomicLong();
041: AtomicLong evictions = new AtomicLong();
042: }
043:
044: private CumulativeStats stats;
045:
046: // per instance stats. The synchronization used for the map will also be
047: // used for updating these statistics (and hence they are not AtomicLongs
048: private long lookups;
049: private long hits;
050: private long inserts;
051: private long evictions;
052:
053: private Map map;
054: private String name;
055: private int autowarmCount;
056: private State state;
057: private CacheRegenerator regenerator;
058: private String description = "LRU Cache";
059:
060: public Object init(Map args, Object persistence,
061: CacheRegenerator regenerator) {
062: state = State.CREATED;
063: this .regenerator = regenerator;
064: name = (String) args.get("name");
065: String str = (String) args.get("size");
066: final int limit = str == null ? 1024 : Integer.parseInt(str);
067: str = (String) args.get("initialSize");
068: final int initialSize = Math.min(str == null ? 1024 : Integer
069: .parseInt(str), limit);
070: str = (String) args.get("autowarmCount");
071: autowarmCount = str == null ? 0 : Integer.parseInt(str);
072:
073: description = "LRU Cache(maxSize=" + limit + ", initialSize="
074: + initialSize;
075: if (autowarmCount > 0) {
076: description += ", autowarmCount=" + autowarmCount
077: + ", regenerator=" + regenerator;
078: }
079: description += ')';
080:
081: map = new LinkedHashMap(initialSize, 0.75f, true) {
082: protected boolean removeEldestEntry(Map.Entry eldest) {
083: if (size() > limit) {
084: // increment evictions regardless of state.
085: // this doesn't need to be synchronized because it will
086: // only be called in the context of a higher level synchronized block.
087: evictions++;
088: stats.evictions.incrementAndGet();
089: return true;
090: }
091: return false;
092: }
093: };
094:
095: if (persistence == null) {
096: // must be the first time a cache of this type is being created
097: persistence = new CumulativeStats();
098: }
099:
100: stats = (CumulativeStats) persistence;
101:
102: return persistence;
103: }
104:
105: public String name() {
106: return name;
107: }
108:
109: public int size() {
110: synchronized (map) {
111: return map.size();
112: }
113: }
114:
115: public synchronized Object put(Object key, Object value) {
116: if (state == State.LIVE) {
117: stats.inserts.incrementAndGet();
118: }
119:
120: synchronized (map) {
121: // increment local inserts regardless of state???
122: // it does make it more consistent with the current size...
123: inserts++;
124: return map.put(key, value);
125: }
126: }
127:
128: public Object get(Object key) {
129: synchronized (map) {
130: Object val = map.get(key);
131: if (state == State.LIVE) {
132: // only increment lookups and hits if we are live.
133: lookups++;
134: stats.lookups.incrementAndGet();
135: if (val != null) {
136: hits++;
137: stats.hits.incrementAndGet();
138: }
139: }
140: return val;
141: }
142: }
143:
144: public void clear() {
145: synchronized (map) {
146: map.clear();
147: }
148: }
149:
150: public void setState(State state) {
151: this .state = state;
152: }
153:
154: public State getState() {
155: return state;
156: }
157:
158: public void warm(SolrIndexSearcher searcher, SolrCache old)
159: throws IOException {
160: if (regenerator == null)
161: return;
162:
163: LRUCache other = (LRUCache) old;
164:
165: // warm entries
166: if (autowarmCount != 0) {
167: Object[] keys, vals = null;
168:
169: // Don't do the autowarming in the synchronized block, just pull out the keys and values.
170: synchronized (other.map) {
171: int sz = other.map.size();
172: if (autowarmCount != -1)
173: sz = Math.min(sz, autowarmCount);
174: keys = new Object[sz];
175: vals = new Object[sz];
176:
177: Iterator iter = other.map.entrySet().iterator();
178:
179: // iteration goes from oldest (least recently used) to most recently used,
180: // so we need to skip over the oldest entries.
181: int skip = other.map.size() - sz;
182: for (int i = 0; i < skip; i++)
183: iter.next();
184:
185: for (int i = 0; i < sz; i++) {
186: Map.Entry entry = (Map.Entry) iter.next();
187: keys[i] = entry.getKey();
188: vals[i] = entry.getValue();
189: }
190: }
191:
192: // autowarm from the oldest to the newest entries so that the ordering will be
193: // correct in the new cache.
194: for (int i = 0; i < keys.length; i++) {
195: try {
196: boolean continueRegen = regenerator.regenerateItem(
197: searcher, this , old, keys[i], vals[i]);
198: if (!continueRegen)
199: break;
200: } catch (Throwable e) {
201: SolrException.log(log,
202: "Error during auto-warming of key:"
203: + keys[i], e);
204: }
205: }
206: }
207: }
208:
209: public void close() {
210: }
211:
212: //////////////////////// SolrInfoMBeans methods //////////////////////
213:
214: public String getName() {
215: return LRUCache.class.getName();
216: }
217:
218: public String getVersion() {
219: return SolrCore.version;
220: }
221:
222: public String getDescription() {
223: return description;
224: }
225:
226: public Category getCategory() {
227: return Category.CACHE;
228: }
229:
230: public String getSourceId() {
231: return "$Id: LRUCache.java 501512 2007-01-30 18:36:32Z yonik $";
232: }
233:
234: public String getSource() {
235: return "$URL: https://svn.apache.org/repos/asf/lucene/solr/branches/branch-1.2/src/java/org/apache/solr/search/LRUCache.java $";
236: }
237:
238: public URL[] getDocs() {
239: return null;
240: }
241:
242: // returns a ratio, not a percent.
243: private static String calcHitRatio(long lookups, long hits) {
244: if (lookups == 0)
245: return "0.00";
246: if (lookups == hits)
247: return "1.00";
248: int hundredths = (int) (hits * 100 / lookups); // rounded down
249: if (hundredths < 10)
250: return "0.0" + hundredths;
251: return "0." + hundredths;
252:
253: /*** code to produce a percent, if we want it...
254: int ones = (int)(hits*100 / lookups);
255: int tenths = (int)(hits*1000 / lookups) - ones*10;
256: return Integer.toString(ones) + '.' + tenths;
257: ***/
258: }
259:
260: public NamedList getStatistics() {
261: NamedList lst = new SimpleOrderedMap();
262: synchronized (map) {
263: lst.add("lookups", lookups);
264: lst.add("hits", hits);
265: lst.add("hitratio", calcHitRatio(lookups, hits));
266: lst.add("inserts", inserts);
267: lst.add("evictions", evictions);
268: lst.add("size", map.size());
269: }
270:
271: long clookups = stats.lookups.get();
272: long chits = stats.hits.get();
273: lst.add("cumulative_lookups", clookups);
274: lst.add("cumulative_hits", chits);
275: lst.add("cumulative_hitratio", calcHitRatio(clookups, chits));
276: lst.add("cumulative_inserts", stats.inserts.get());
277: lst.add("cumulative_evictions", stats.evictions.get());
278:
279: return lst;
280: }
281:
282: public String toString() {
283: return name + getStatistics().toString();
284: }
285: }
|