at.spardat.enterprise.cache.DefaultCache Maven / Gradle / Ivy
/*******************************************************************************
* Copyright (c) 2003, 2007 s IT Solutions AT Spardat GmbH .
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* s IT Solutions AT Spardat GmbH - initial API and implementation
*******************************************************************************/
package at.spardat.enterprise.cache;
import java.text.SimpleDateFormat;
import java.util.*;
/**
* A cache implementation, where the entries are kept in a HashMap. Moreover, each
* entry is also inserted in two doubly linked list. One specifies the insertion and the
* other the access-order.
*/
public class DefaultCache implements ICache {
/**
* Constructs a Cache and takes the caching behaviour from the provided descriptor.
*/
public DefaultCache (ICacheDescriptor descriptor) {
descriptor_ = descriptor;
maxSize_ = descriptor.getMaxSize();
if (maxSize_ > 0) {
lruHead_ = new Entry();
lruHead_.lruNext_ = lruHead_;
lruHead_.lruPrev_ = lruHead_;
}
maxAge_ = descriptor.getMaxAgeMillis();
maxAgeSpreadPct_ = descriptor.getMaxAgeSpreadPct();
if (maxAge_ > 0) {
expHead_ = new Entry();
expHead_.expNext_ = expHead_;
expHead_.expPrev_ = expHead_;
}
}
/**
* @see at.spardat.enterprise.cache.ICache#lookup(Object)
*/
public Object lookup (Object key) {
Object result = null;
Entry e = null;
// the first synchonized block contains the code we consider is beeing executed
// most frequenty; a successful cache lookup.
synchronized (this) {
cleanupByTime(); // remove timed out entries
e = (Entry) entriesHashed_.get(key);
cntAccess_++; // statistics
if (e != null) {
// we've found the object; since this object has been accessed now,
// we put it in front of the LRU list
if (lruHead_ != null) e.lruMoveToFront();
result = e.value_;
cntHit_++;
}
}
if (e == null) {
// object is definitely not in the cache.
// if the cachedescriptor can load it, do it.
if (descriptor_.isTransparent()) {
result = null;
Entry insertedEntry = null;
while (insertedEntry == null) {
insertedEntry = loadAndInsert (key);
if (insertedEntry == null) {
synchronized (this) { cntLoadHazard_++; };
} else {
result = insertedEntry.value_;
}
}
// clean up by size
if (lruHead_ != null) {
synchronized (this) {
cleanupBySize();
}
}
}
}
if (ASSERT) checkInvariant(); // use the synchronized one
return result;
}
/**
* This method is called, if the object for a particular key has not been found
* on a first lookup and the descriptor is responsible for loading objects.
* On invocation of this method, the actual thread must not have a monitor on this.
*
* @return an Entry-object which is non null with very high probability. The null might
* occur if this thread waits on a load operation of another thread and
* the other threads load operation fails. In this case, the caller should
* recall this method.
* @throws RuntimeException on load errors. These exceptions are forwared from
* the load method of the descriptor.
*/
private Entry loadAndInsert (Object key) {
Entry result = null;
Lock l = null;
boolean lockIsMine = false;
synchronized (this) {
Entry e = (Entry) entriesHashed_.get(key);
if (e != null) return e;
l = (Lock) loadLocks_.get(key);
if (l == null) {
lockIsMine = true;
loadLocks_.put(key, l = new Lock());
} else {
cntLoadWait_++; // wait for another thread to load
}
}
// here, the following condition holds:
// 1) the requested object is not in the cache
// 2) there is a corresponding lock object (l) in loadLocks_
// 3) if lockIsMine, the Lock has been created by this thread and it is
// the task of this thread to load the object and release the lock
// 4) if !lockIsMine, another thread is currently loading the object
// and we have to wait
if (lockIsMine) {
Object toLoad = null;
boolean loadSucceeded = false;
// load the object, insert it and release the lock
try {
cntLoad_++; // load operation
toLoad = descriptor_.load(key);
loadSucceeded = true;
if (DEBUG) out ("loaded " + key);
} finally {
synchronized (this) {
if (loadSucceeded) {
// insert loaded object
result = insertImpl (key, toLoad);
if (DEBUG) out ("inserted " + key);
} else {
cntLoadFailed_++;
}
// release lock
loadLocks_.remove(key);
// wake up waiting threads
synchronized (l) {
l.state_ = Lock.INSERTED;
if (loadSucceeded) l.loadedEntry_ = result;
l.notifyAll();
}
}
}
} else {
// wait until the lock gets released
synchronized (l) {
if (DEBUG) out ("waiting on load completion for " + key);
if (l.state_ == Lock.LOADING) {
try {
l.wait();
} catch (InterruptedException ex) {};
}
}
synchronized (this) {
result = l.loadedEntry_; // maybe null
if (DEBUG) {
if (result == null) out ("unsuccessful wait for " + key);
else out ("successful wait for " + key);
}
}
}
return result;
}
/**
* @see at.spardat.enterprise.cache.ICache#remove(Object)
*/
public synchronized void remove (Object key) {
cleanupByTime();
Entry e = (Entry) entriesHashed_.get(key);
if (e != null) removeImpl (e);
if (ASSERT) checkInvariantImpl();
}
/**
* @see at.spardat.enterprise.cache.ICache#removeAll()
*/
public synchronized void removeAll () {
if (lruHead_ != null) {
lruHead_.lruPrev_ = lruHead_;
lruHead_.lruNext_ = lruHead_;
}
if (expHead_ != null) {
expHead_.expPrev_ = expHead_;
expHead_.expNext_ = expHead_;
}
entriesHashed_.clear();
if (ASSERT) checkInvariantImpl();
}
/**
* @see at.spardat.enterprise.cache.ICache#removeAllHaving(at.spardat.enterprise.cache.ICache.IKeyFilter)
*/
public synchronized void removeAllHaving (IKeyFilter f) {
Iterator iter = entriesHashed_.keySet().iterator();
ArrayList keysToDelete = new ArrayList ();
while (iter.hasNext()) {
Object key = iter.next();
if (f.accept(key)) keysToDelete.add(key);
}
for (int i=0; i 0 && maxAgeSpreadPct_ < 100) {
long spread = maxAge_ * maxAgeSpreadPct_ / 100;
age = maxAge_ - spread + (long)(spread*2*Math.random());
}
e.expirationTime_ = System.currentTimeMillis() + age;
e.expInsertByTime();
}
return e;
}
// removes an entry from this cache without doing cleanups and synchronization
private void removeImpl (Entry e) {
entriesHashed_.remove(e.key_);
if (lruHead_ != null) e.lruRemove();
if (expHead_ != null) e.expRemove();
}
/**
* Removes timed out objects and LRU-victims, if the cache grows to large. This
* method is not synchronized.
*/
private void cleanup () {
cleanupByTime();
cleanupBySize();
}
/**
* If this cache is size constrained and there are more entries than allowed, victims
* are selected from the LRU-end of the LRU-list and removed from the cache.
* The caller must synchronize.
*/
private void cleanupBySize () {
// check if the maximum number of entries has been exceeded. If so, victims are
// selected from the LRU list.
if (lruHead_ != null) {
while (entriesHashed_.size() > maxSize_) {
if (DEBUG) out ("removing LRU victim " + lruHead_.lruPrev_.key_);
removeImpl (lruHead_.lruPrev_);
}
}
if (ASSERT) checkInvariantImpl();
}
/**
* If the cache is age constrained and there are outdated objects, these are removed
* from the cache. The caller has to synchronize.
*/
private void cleanupByTime () {
// look for entries which are timed out already; they are at the back of the list.
if (expHead_ != null) {
long currentTime = System.currentTimeMillis();
Entry e = expHead_.expPrev_;
while (e != expHead_ && e.expirationTime_ <= currentTime) {
if (DEBUG) out ("removing expiration victim " + expHead_.expPrev_.key_);
removeImpl (e);
e = expHead_.expPrev_;
}
}
if (ASSERT) checkInvariantImpl();
}
private static final boolean ASSERT = false;
private static final boolean DEBUG = false;
private void out (String message) {
synchronized (System.out) {
System.out.print(Thread.currentThread()); System.out.print (": ");
System.out.println(message);
}
}
/**
* checks the invariant; unsynchronized
* @throws RuntimException on failed invariant
*/
private void checkInvariantImpl () {
// first, check if all entries on the lru list are also in the HashMap
int numObjects = 0;
if (lruHead_ != null) {
Entry e = lruHead_.lruNext_;
while (e != lruHead_) {
numObjects++;
// check if e is in the HashMap
Object value = entriesHashed_.get(e.key_);
if (value != e) throw new RuntimeException ("cannot find LRU list entry in hashMap");
e = e.lruNext_;
}
if (numObjects != entriesHashed_.size()) throw new RuntimeException ("damaged LRU list");
}
// secondly, check if all entries on the expiration list are also in the HashMap and are
// ordered decreasingly on expiration time
numObjects = 0;
long lastTime = Long.MAX_VALUE;
if (expHead_ != null) {
Entry e = expHead_.expNext_;
while (e != expHead_) {
numObjects++;
// check if e is in the HashMap
Object value = entriesHashed_.get(e.key_);
if (value != e) throw new RuntimeException ("cannot find expiration list entry in hashMap");
if (e.expirationTime_ > lastTime) throw new RuntimeException ("exp list not sorted decreasingly on expirationTime");
lastTime = e.expirationTime_;
e = e.expNext_;
}
if (numObjects != entriesHashed_.size()) throw new RuntimeException ("damaged expiration list");
}
// not more than maxSize_ objects? This check has been relaxed, since there may be states of the
// cache where there are more objects for a short period of time, e.g., insertion followed by a cleanup
// where the operation is not atomically implemented in order to avoid synchronization.
if (maxSize_ > 0 && entriesHashed_.size() > 2 * maxSize_) throw new RuntimeException ("much more objects cached than allowed by maxSize_");
}
/**
* The public synchronized version of consistency checking
*/
public synchronized void checkInvariant () {
checkInvariantImpl();
}
/**
* outputs the cache content and access statistics
*/
public void print () {
printImpl (true);
}
/**
* Outputs statistics of cache usage to System.out. Just for development phase.
* Do not call this method in production environments.
*/
public void printStatistics () {
printImpl (false);
}
/**
* outputs cache statistics and optional content
*
* @param withContent if true, keys are shown in LRU and time-order
*/
private synchronized void printImpl (boolean withContent) {
SimpleDateFormat df = new SimpleDateFormat ("dd.MM.yyyy HH:mm:ss.SSS");
synchronized (System.out) {
System.out.println();
System.out.println("--------------------------------------------------------------------------------------------------");
System.out.println("------ cache statistics for '" + descriptor_.getName() + "'");
System.out.println("--------------------------------------------------------------------------------------------------");
System.out.println("maxSize: " + maxSize_ + ", maxAge: " + maxAge_ + ", ageSpread: " + maxAgeSpreadPct_ + " %");
System.out.println("size: " + entriesHashed_.size());
double hitRatio = Double.NaN;
if (cntAccess_ != 0) hitRatio = cntHit_/(double)cntAccess_;
System.out.println("accesses: " + cntAccess_ + ", hits: " + cntHit_ + ", hitRatio: " + hitRatio);
System.out.println("loads: " + cntLoad_ + ", loadsFailed: " + cntLoadFailed_ + ", loadWaits: " + cntLoadWait_ + ", loadHazards: " + cntLoadHazard_);
if (withContent) {
if (lruHead_ != null) {
System.out.println ("--- entries in order MRU --> LRU (last are victims):");
Entry e = lruHead_.lruNext_;
while (e != lruHead_) {
System.out.println (e.key_);
e = e.lruNext_;
}
}
if (expHead_ != null) {
System.out.println ("--- entries in order youngest --> oldest (last are victims):");
Entry e = expHead_.expNext_;
while (e != expHead_) {
System.out.println (e.key_ + ", expires: " + df.format(new java.util.Date(e.expirationTime_)));
e = e.expNext_;
}
}
}
}
}
/**
* Returns the number of currently cached objects.
*/
public int getCurrentSize() {
return entriesHashed_.size();
}
/**
* Returns the number of accesses to this cache over its lifetime.
*/
public long getCntAccess() {
return cntAccess_;
}
/**
* Returns the number of cache hits over the lifetime of this cache.
*/
public long getCntHit() {
return cntHit_;
}
// provides the meta information about caching strategies.
private ICacheDescriptor descriptor_;
// max size of the cache
private int maxSize_;
// max livetime of an object in the cache in millis
private long maxAge_;
// livetime actually is equally distributed over
// [maxAge-(maxAge_*maxAgeSpreadPct_/100) ... maxAge+(maxAge_*maxAgeSpreadPct_/100)]
private int maxAgeSpreadPct_;
private class Entry {
public Object key_;
public Object value_;
// the time this object will expire
public long expirationTime_;
// the LRULinkedList
public Entry lruPrev_;
public Entry lruNext_;
// removes this entry from the lruList
public void lruRemove () {
lruPrev_.lruNext_ = lruNext_;
lruNext_.lruPrev_ = lruPrev_;
lruPrev_ = null;
lruNext_ = null;
}
// makes this unlinked Entry the first in the lru list
public void lruInsertFront () {
lruPrev_ = lruHead_;
lruNext_ = lruHead_.lruNext_;
lruPrev_.lruNext_ = this;
lruNext_.lruPrev_ = this;
}
// makes this linked Entry the first in the lru list
public void lruMoveToFront () {
lruRemove();
lruInsertFront();
}
// the expiration linked list
public Entry expPrev_;
public Entry expNext_;
// removes this entry from the expiration list
public void expRemove () {
expPrev_.expNext_ = expNext_;
expNext_.expPrev_ = expPrev_;
expPrev_ = null;
expNext_ = null;
}
// makes this unlinked Entry the first in the expiration list
// public void expInsertFront () {
// expPrev_ = expHead_;
// expNext_ = expHead_.expNext_;
// expPrev_.expNext_ = this;
// expNext_.expPrev_ = this;
// }
// inserts this on the exp-list so that all entries remain sorted
// descending on expirationTime
public void expInsertByTime () {
Entry e = expHead_.expNext_;
// find a Entry whose expirationTime is less than or equal
// to the new entry. Then we can insert the new entry in
// front of the found one.
while (e != expHead_) {
if (e.expirationTime_ <= expirationTime_) break;
e = e.expNext_;
}
// insert in front of e
expPrev_ = e.expPrev_;
expNext_ = e;
e.expPrev_.expNext_ = this;
e.expPrev_ = this;
}
}
// the value are Entry objects. Let an value be the Entry x,
// then the key in this HashMap is x.key_.
private HashMap entriesHashed_ = new HashMap();
// the head of a circularily double linked list whose entries
// are linked via lruPrev_ and lruNext_
private Entry lruHead_;
// the head of a circularily double linked list whose entries
// are linked via expPrev_ and expNext_.
// the entries on this list are sorted descending on
// expirationTime
private Entry expHead_;
// this map contains entries for objects which are beeing loaded at the
// moment; keys are the same as in entriesHashed_, values
// are Lock-Objects which are used as to synchronize on loading.
private HashMap loadLocks_ = new HashMap();
/**
* To synchronize threads waiting on load completion for a particular
* object.
*/
private static class Lock {
public static final int LOADING = 1;
public static final int INSERTED = 2;
// state_ may be LOADING or INSERTED
public int state_ = LOADING;
// non null in state INSERTED if and only if the load operation succeeded
public Entry loadedEntry_;
}
// how often has the cache been accessed so far via lookup
private long cntAccess_;
// how many of these access-operations succeeded, i.e., the object has
// been found in the cache
private long cntHit_;
// how many load operations have been performed
private long cntLoad_;
// how many load operations terminated with an exception
private long cntLoadFailed_;
// how often had there been cache misses where the following load operation
// blocked because another thread was already loading this object at the same time
// so that the thread had to wait.
private long cntLoadWait_;
// how often had an object to be reloaded, because load operations failed.
private long cntLoadHazard_;
// test method
public static void main (String [] args) throws Exception {
}
}