Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
org.glassfish.jersey.internal.guava.LocalCache Maven / Gradle / Ivy
Go to download
A bundle project producing JAX-RS RI bundles. The primary artifact is an "all-in-one" OSGi-fied JAX-RS RI bundle
(jaxrs-ri.jar).
Attached to that are two compressed JAX-RS RI archives. The first archive (jaxrs-ri.zip) consists of binary RI bits and
contains the API jar (under "api" directory), RI libraries (under "lib" directory) as well as all external
RI dependencies (under "ext" directory). The secondary archive (jaxrs-ri-src.zip) contains buildable JAX-RS RI source
bundle and contains the API jar (under "api" directory), RI sources (under "src" directory) as well as all external
RI dependencies (under "ext" directory). The second archive also contains "build.xml" ANT script that builds the RI
sources. To build the JAX-RS RI simply unzip the archive, cd to the created jaxrs-ri directory and invoke "ant" from
the command line.
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.glassfish.jersey.internal.guava;
import java.io.Serializable;
import java.lang.ref.Reference;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.util.AbstractCollection;
import java.util.AbstractMap;
import java.util.AbstractQueue;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
import static org.glassfish.jersey.internal.guava.MoreExecutors.directExecutor;
import static org.glassfish.jersey.internal.guava.Preconditions.checkNotNull;
import static org.glassfish.jersey.internal.guava.Preconditions.checkState;
import static org.glassfish.jersey.internal.guava.Uninterruptibles.getUninterruptibly;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
/**
* The concurrent hash map implementation built by {@link CacheBuilder}.
*
*
This implementation is heavily derived from revision 1.96 of ConcurrentHashMap.java .
*
* @author Charles Fry
* @author Bob Lee ({@code org.glassfish.jersey.internal.guava.MapMaker})
* @author Doug Lea ({@code ConcurrentHashMap})
*/
class LocalCache extends AbstractMap implements ConcurrentMap {
/*
* The basic strategy is to subdivide the table among Segments, each of which itself is a
* concurrently readable hash table. The map supports non-blocking reads and concurrent writes
* across different segments.
*
* If a maximum size is specified, a best-effort bounding is performed per segment, using a
* page-replacement algorithm to determine which entries to evict when the capacity has been
* exceeded.
*
* The page replacement algorithm's data structures are kept casually consistent with the map. The
* ordering of writes to a segment is sequentially consistent. An update to the map and recording
* of reads may not be immediately reflected on the algorithm's data structures. These structures
* are guarded by a lock and operations are applied in batches to avoid lock contention. The
* penalty of applying the batches is spread across threads so that the amortized cost is slightly
* higher than performing just the operation without enforcing the capacity constraint.
*
* This implementation uses a per-segment queue to record a memento of the additions, removals,
* and accesses that were performed on the map. The queue is drained on writes and when it exceeds
* its capacity threshold.
*
* The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit
* rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation
* operates per-segment rather than globally for increased implementation simplicity. We expect
* the cache hit rate to be similar to that of a global LRU algorithm.
*/
// Constants
/**
* The maximum capacity, used if a higher value is implicitly specified by either of the
* constructors with arguments. MUST be a power of two <= 1<<30 to ensure that entries are
* indexable using ints.
*/
private static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* The maximum number of segments to allow; used to bound constructor arguments.
*/
private static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
/**
* Number of (unsynchronized) retries in the containsValue method.
*/
private static final int CONTAINS_VALUE_RETRIES = 3;
/**
* Number of cache access operations that can be buffered per segment before the cache's recency
* ordering information is updated. This is used to avoid lock contention by recording a memento
* of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs.
*
*
This must be a (2^n)-1 as it is used as a mask.
*/
private static final int DRAIN_THRESHOLD = 0x3F;
/**
* Maximum number of entries to be drained in a single cleanup run. This applies independently to
* the cleanup queue and both reference queues.
*/
// TODO(fry): empirically optimize this
private static final int DRAIN_MAX = 16;
// Fields
private static final Logger logger = Logger.getLogger(LocalCache.class.getName());
/**
* Placeholder. Indicates that the value hasn't been set yet.
*/
private static final ValueReference UNSET = new ValueReference() {
@Override
public Object get() {
return null;
}
@Override
public int getWeight() {
return 0;
}
@Override
public ReferenceEntry getEntry() {
return null;
}
@Override
public ValueReference copyFor(ReferenceQueue queue,
Object value, ReferenceEntry entry) {
return this;
}
@Override
public boolean isLoading() {
return false;
}
@Override
public boolean isActive() {
return false;
}
@Override
public Object waitForValue() {
return null;
}
@Override
public void notifyNewValue(Object newValue) {
}
};
private static final Queue> DISCARDING_QUEUE = new AbstractQueue() {
@Override
public boolean offer(Object o) {
return true;
}
@Override
public Object peek() {
return null;
}
@Override
public Object poll() {
return null;
}
@Override
public int size() {
return 0;
}
@Override
public Iterator iterator() {
return Iterators.emptyIterator();
}
};
/**
* Mask value for indexing into segments. The upper bits of a key's hash code are used to choose
* the segment.
*/
private final int segmentMask;
/**
* Shift value for indexing within segments. Helps prevent entries that end up in the same segment
* from also ending up in the same bucket.
*/
private final int segmentShift;
/**
* The segments, each of which is a specialized hash table.
*/
private final Segment[] segments;
/**
* The concurrency level.
*/
private final int concurrencyLevel;
/**
* Strategy for comparing keys.
*/
private final Equivalence keyEquivalence;
/**
* Strategy for comparing values.
*/
private final Equivalence valueEquivalence;
/**
* Strategy for referencing keys.
*/
private final Strength keyStrength;
/**
* Strategy for referencing values.
*/
private final Strength valueStrength;
/**
* The maximum weight of this map. UNSET_INT if there is no maximum.
*/
private final long maxWeight;
/**
* How long after the last access to an entry the map will retain that entry.
*/
private final long expireAfterAccessNanos;
/**
* How long after the last write to an entry the map will retain that entry.
*/
private final long expireAfterWriteNanos;
/**
* How long after the last write an entry becomes a candidate for refresh.
*/
private final long refreshNanos;
/**
* Entries waiting to be consumed by the removal listener.
*/
// TODO(fry): define a new type which creates event objects and automates the clear logic
private final Queue> removalNotificationQueue;
/**
* Measures time in a testable way.
*/
private final Ticker ticker;
/**
* Factory used to create new entries.
*/
private final EntryFactory entryFactory;
/**
* The default cache loader to use on loading operations.
*/
private final CacheLoader super K, V> defaultLoader;
private Set keySet;
private Collection values;
private Set> entrySet;
/**
* Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
*/
private LocalCache(
CacheBuilder super K, ? super V> builder, CacheLoader super K, V> loader) {
concurrencyLevel = Math.min(builder.getConcurrencyLevel(), MAX_SEGMENTS);
keyStrength = Strength.STRONG;
valueStrength = Strength.STRONG;
keyEquivalence = keyStrength.defaultEquivalence();
valueEquivalence = valueStrength.defaultEquivalence();
maxWeight = CacheBuilder.UNSET_INT;
expireAfterAccessNanos = builder.getExpireAfterAccessNanos();
expireAfterWriteNanos = CacheBuilder.DEFAULT_EXPIRATION_NANOS;
refreshNanos = CacheBuilder.DEFAULT_REFRESH_NANOS;
removalNotificationQueue = LocalCache.discardingQueue();
ticker = recordsTime() ? Ticker.systemTicker() : CacheBuilder.NULL_TICKER;
entryFactory = EntryFactory.getFactory(keyStrength, usesAccessEntries(), usesWriteEntries());
defaultLoader = loader;
int initialCapacity = Math.min(CacheBuilder.DEFAULT_INITIAL_CAPACITY, MAXIMUM_CAPACITY);
if (evictsBySize()) {
initialCapacity = Math.min(initialCapacity, (int) maxWeight);
}
// Find the lowest power-of-two segmentCount that exceeds concurrencyLevel, unless
// maximumSize/Weight is specified in which case ensure that each segment gets at least 10
// entries. The special casing for size-based eviction is only necessary because that eviction
// happens per segment instead of globally, so too many segments compared to the maximum size
// will result in random eviction behavior.
int segmentShift = 0;
int segmentCount = 1;
while (segmentCount < concurrencyLevel
&& (!evictsBySize() || segmentCount * 20 <= maxWeight)) {
++segmentShift;
segmentCount <<= 1;
}
this.segmentShift = 32 - segmentShift;
segmentMask = segmentCount - 1;
this.segments = newSegmentArray(segmentCount);
int segmentCapacity = initialCapacity / segmentCount;
if (segmentCapacity * segmentCount < initialCapacity) {
++segmentCapacity;
}
int segmentSize = 1;
while (segmentSize < segmentCapacity) {
segmentSize <<= 1;
}
if (evictsBySize()) {
// Ensure sum of segment max weights = overall max weights
long maxSegmentWeight = maxWeight / segmentCount + 1;
long remainder = maxWeight % segmentCount;
for (int i = 0; i < this.segments.length; ++i) {
if (i == remainder) {
maxSegmentWeight--;
}
this.segments[i] =
createSegment(segmentSize, maxSegmentWeight);
}
} else {
for (int i = 0; i < this.segments.length; ++i) {
this.segments[i] =
createSegment(segmentSize, CacheBuilder.UNSET_INT);
}
}
}
/**
* Singleton placeholder that indicates a value is being loaded.
*/
@SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
private static ValueReference unset() {
return (ValueReference) UNSET;
}
@SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
private static ReferenceEntry nullEntry() {
return (ReferenceEntry) NullEntry.INSTANCE;
}
/**
* Queue that discards all elements.
*/
@SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
private static Queue discardingQueue() {
return (Queue) DISCARDING_QUEUE;
}
/**
* Applies a supplemental hash function to a given hash code, which defends against poor quality
* hash functions. This is critical when the concurrent hash map uses power-of-two length hash
* tables, that otherwise encounter collisions for hash codes that do not differ in lower or
* upper bits.
*
* @param h hash code
*/
private static int rehash(int h) {
// Spread bits to regularize both segment and index locations,
// using variant of single-word Wang/Jenkins hash.
// TODO(kevinb): use Hashing/move this to Hashing?
h += (h << 15) ^ 0xffffcd7d;
h ^= (h >>> 10);
h += (h << 3);
h ^= (h >>> 6);
h += (h << 2) + (h << 14);
return h ^ (h >>> 16);
}
// Guarded By Segment.this
private static void connectAccessOrder(ReferenceEntry previous, ReferenceEntry next) {
previous.setNextInAccessQueue(next);
next.setPreviousInAccessQueue(previous);
}
// Guarded By Segment.this
private static void nullifyAccessOrder(ReferenceEntry nulled) {
ReferenceEntry nullEntry = nullEntry();
nulled.setNextInAccessQueue(nullEntry);
nulled.setPreviousInAccessQueue(nullEntry);
}
// Guarded By Segment.this
private static void connectWriteOrder(ReferenceEntry previous, ReferenceEntry next) {
previous.setNextInWriteQueue(next);
next.setPreviousInWriteQueue(previous);
}
// Guarded By Segment.this
private static void nullifyWriteOrder(ReferenceEntry nulled) {
ReferenceEntry nullEntry = nullEntry();
nulled.setNextInWriteQueue(nullEntry);
nulled.setPreviousInWriteQueue(nullEntry);
}
boolean evictsBySize() {
return maxWeight >= 0;
}
private boolean expiresAfterWrite() {
return expireAfterWriteNanos > 0;
}
private boolean expiresAfterAccess() {
return expireAfterAccessNanos > 0;
}
boolean refreshes() {
return refreshNanos > 0;
}
boolean usesAccessQueue() {
return expiresAfterAccess() || evictsBySize();
}
boolean usesWriteQueue() {
return expiresAfterWrite();
}
boolean recordsWrite() {
return expiresAfterWrite() || refreshes();
}
boolean recordsAccess() {
return expiresAfterAccess();
}
private boolean recordsTime() {
return recordsWrite() || recordsAccess();
}
private boolean usesWriteEntries() {
return usesWriteQueue() || recordsWrite();
}
private boolean usesAccessEntries() {
return usesAccessQueue() || recordsAccess();
}
/*
* Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins!
* To maintain this code, make a change for the strong reference type. Then, cut and paste, and
* replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that
* strong entries store the key reference directly while soft and weak entries delegate to their
* respective superclasses.
*/
boolean usesKeyReferences() {
return keyStrength != Strength.STRONG;
}
boolean usesValueReferences() {
return valueStrength != Strength.STRONG;
}
private int hash(Object key) {
int h = keyEquivalence.hash(key);
return rehash(h);
}
void reclaimValue(ValueReference valueReference) {
ReferenceEntry entry = valueReference.getEntry();
int hash = entry.getHash();
segmentFor(hash).reclaimValue(entry.getKey(), hash, valueReference);
}
void reclaimKey(ReferenceEntry entry) {
int hash = entry.getHash();
segmentFor(hash).reclaimKey(entry, hash);
}
/**
* Returns the segment that should be used for a key with the given hash.
*
* @param hash the hash code for the key
* @return the segment
*/
private Segment segmentFor(int hash) {
// TODO(fry): Lazily create segments?
return segments[(hash >>> segmentShift) & segmentMask];
}
private Segment createSegment(
int initialCapacity, long maxSegmentWeight) {
return new Segment(this, initialCapacity, maxSegmentWeight);
}
/**
* Gets the value from an entry. Returns null if the entry is invalid, partially-collected,
* loading, or expired. Unlike {@link Segment#getLiveValue} this method does not attempt to
* cleanup stale entries. As such it should only be called outside of a segment context, such as
* during iteration.
*/
private V getLiveValue(ReferenceEntry entry, long now) {
if (entry.getKey() == null) {
return null;
}
V value = entry.getValueReference().get();
if (value == null) {
return null;
}
if (isExpired(entry, now)) {
return null;
}
return value;
}
/**
* Returns true if the entry has expired.
*/
boolean isExpired(ReferenceEntry entry, long now) {
checkNotNull(entry);
if (expiresAfterAccess()
&& (now - entry.getAccessTime() >= expireAfterAccessNanos)) {
return true;
}
if (expiresAfterWrite()
&& (now - entry.getWriteTime() >= expireAfterWriteNanos)) {
return true;
}
return false;
}
@SuppressWarnings("unchecked")
private Segment[] newSegmentArray(int ssize) {
return new Segment[ssize];
}
@Override
public boolean isEmpty() {
/*
* Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and
* removed in one segment while checking another, in which case the table was never actually
* empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment
* modifications before recheck.) Method containsValue() uses similar constructions for
* stability checks.
*/
long sum = 0L;
Segment[] segments = this.segments;
for (int i = 0; i < segments.length; ++i) {
if (segments[i].count != 0) {
return false;
}
sum += segments[i].modCount;
}
if (sum != 0L) { // recheck unless no modifications
for (int i = 0; i < segments.length; ++i) {
if (segments[i].count != 0) {
return false;
}
sum -= segments[i].modCount;
}
if (sum != 0L) {
return false;
}
}
return true;
}
private long longSize() {
Segment[] segments = this.segments;
long sum = 0;
for (int i = 0; i < segments.length; ++i) {
sum += segments[i].count;
}
return sum;
}
@Override
public int size() {
return Ints.saturatedCast(longSize());
}
@Override
public V get(Object key) {
if (key == null) {
return null;
}
int hash = hash(key);
return segmentFor(hash).get(key, hash);
}
V getIfPresent(Object key) {
int hash = hash(checkNotNull(key));
return segmentFor(hash).get(key, hash);
}
private V get(K key, CacheLoader super K, V> loader) throws ExecutionException {
int hash = hash(checkNotNull(key));
return segmentFor(hash).get(key, hash, loader);
}
V getOrLoad(K key) throws ExecutionException {
return get(key, defaultLoader);
}
@Override
public boolean containsKey(Object key) {
// does not impact recency ordering
if (key == null) {
return false;
}
int hash = hash(key);
return segmentFor(hash).containsKey(key, hash);
}
@Override
public boolean containsValue(Object value) {
// does not impact recency ordering
if (value == null) {
return false;
}
// This implementation is patterned after ConcurrentHashMap, but without the locking. The only
// way for it to return a false negative would be for the target value to jump around in the map
// such that none of the subsequent iterations observed it, despite the fact that at every point
// in time it was present somewhere int the map. This becomes increasingly unlikely as
// CONTAINS_VALUE_RETRIES increases, though without locking it is theoretically possible.
long now = ticker.read();
final Segment[] segments = this.segments;
long last = -1L;
for (int i = 0; i < CONTAINS_VALUE_RETRIES; i++) {
long sum = 0L;
for (Segment segment : segments) {
// ensure visibility of most recent completed write
@SuppressWarnings({"UnusedDeclaration", "unused"})
int c = segment.count; // read-volatile
AtomicReferenceArray> table = segment.table;
for (int j = 0; j < table.length(); j++) {
for (ReferenceEntry e = table.get(j); e != null; e = e.getNext()) {
V v = segment.getLiveValue(e, now);
if (v != null && valueEquivalence.equivalent(value, v)) {
return true;
}
}
}
sum += segment.modCount;
}
if (sum == last) {
break;
}
last = sum;
}
return false;
}
@Override
public V put(K key, V value) {
checkNotNull(key);
checkNotNull(value);
int hash = hash(key);
return segmentFor(hash).put(key, hash, value, false);
}
// expiration
@Override
public V putIfAbsent(K key, V value) {
checkNotNull(key);
checkNotNull(value);
int hash = hash(key);
return segmentFor(hash).put(key, hash, value, true);
}
// queues
@Override
public void putAll(Map extends K, ? extends V> m) {
for (Entry extends K, ? extends V> e : m.entrySet()) {
put(e.getKey(), e.getValue());
}
}
@Override
public V remove(Object key) {
if (key == null) {
return null;
}
int hash = hash(key);
return segmentFor(hash).remove(key, hash);
}
@Override
public boolean remove(Object key, Object value) {
if (key == null || value == null) {
return false;
}
int hash = hash(key);
return segmentFor(hash).remove(key, hash, value);
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
checkNotNull(key);
checkNotNull(newValue);
if (oldValue == null) {
return false;
}
int hash = hash(key);
return segmentFor(hash).replace(key, hash, oldValue, newValue);
}
@Override
public V replace(K key, V value) {
checkNotNull(key);
checkNotNull(value);
int hash = hash(key);
return segmentFor(hash).replace(key, hash, value);
}
@Override
public void clear() {
for (Segment segment : segments) {
segment.clear();
}
}
// Inner Classes
@Override
public Set keySet() {
// does not impact recency ordering
Set ks = keySet;
return (ks != null) ? ks : (keySet = new KeySet(this));
}
@Override
public Collection values() {
// does not impact recency ordering
Collection vs = values;
return (vs != null) ? vs : (values = new Values(this));
}
// Queues
@Override
public Set> entrySet() {
// does not impact recency ordering
Set> es = entrySet;
return (es != null) ? es : (entrySet = new EntrySet(this));
}
enum Strength {
/*
* TODO(kevinb): If we strongly reference the value and aren't loading, we needn't wrap the
* value. This could save ~8 bytes per entry.
*/
STRONG {
@Override
ValueReference referenceValue(
Segment segment, ReferenceEntry entry, V value, int weight) {
return (weight == 1)
? new StrongValueReference(value)
: new WeightedStrongValueReference(value, weight);
}
@Override
Equivalence defaultEquivalence() {
return Equivalence.equals();
}
},
WEAK {
@Override
ValueReference referenceValue(
Segment segment, ReferenceEntry entry, V value, int weight) {
return (weight == 1)
? new WeakValueReference(segment.valueReferenceQueue, value, entry)
: new WeightedWeakValueReference(
segment.valueReferenceQueue, value, entry, weight);
}
@Override
Equivalence defaultEquivalence() {
return Equivalence.identity();
}
};
/**
* Creates a reference for the given value according to this value strength.
*/
abstract ValueReference referenceValue(
Segment segment, ReferenceEntry entry, V value, int weight);
/**
* Returns the default equivalence strategy used to compare and hash keys or values referenced
* at this strength. This strategy will be used unless the user explicitly specifies an
* alternate strategy.
*/
abstract Equivalence defaultEquivalence();
}
// Cache support
// ConcurrentMap methods
/**
* Creates new entries.
*/
enum EntryFactory {
STRONG {
@Override
ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next) {
return new StrongEntry(key, hash, next);
}
},
STRONG_ACCESS {
@Override
ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next) {
return new StrongAccessEntry(key, hash, next);
}
@Override
ReferenceEntry copyEntry(
Segment segment, ReferenceEntry original, ReferenceEntry newNext) {
ReferenceEntry newEntry = super.copyEntry(segment, original, newNext);
copyAccessEntry(original, newEntry);
return newEntry;
}
},
STRONG_WRITE {
@Override
ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next) {
return new StrongWriteEntry(key, hash, next);
}
@Override
ReferenceEntry copyEntry(
Segment segment, ReferenceEntry original, ReferenceEntry newNext) {
ReferenceEntry newEntry = super.copyEntry(segment, original, newNext);
copyWriteEntry(original, newEntry);
return newEntry;
}
},
STRONG_ACCESS_WRITE {
@Override
ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next) {
return new StrongAccessWriteEntry(key, hash, next);
}
@Override
ReferenceEntry copyEntry(
Segment segment, ReferenceEntry original, ReferenceEntry newNext) {
ReferenceEntry newEntry = super.copyEntry(segment, original, newNext);
copyAccessEntry(original, newEntry);
copyWriteEntry(original, newEntry);
return newEntry;
}
},
WEAK {
@Override
ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next) {
return new WeakEntry(segment.keyReferenceQueue, key, hash, next);
}
},
WEAK_ACCESS {
@Override
ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next) {
return new WeakAccessEntry(segment.keyReferenceQueue, key, hash, next);
}
@Override
ReferenceEntry copyEntry(
Segment segment, ReferenceEntry original, ReferenceEntry newNext) {
ReferenceEntry newEntry = super.copyEntry(segment, original, newNext);
copyAccessEntry(original, newEntry);
return newEntry;
}
},
WEAK_WRITE {
@Override
ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next) {
return new WeakWriteEntry(segment.keyReferenceQueue, key, hash, next);
}
@Override
ReferenceEntry copyEntry(
Segment segment, ReferenceEntry original, ReferenceEntry newNext) {
ReferenceEntry newEntry = super.copyEntry(segment, original, newNext);
copyWriteEntry(original, newEntry);
return newEntry;
}
},
WEAK_ACCESS_WRITE {
@Override
ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next) {
return new WeakAccessWriteEntry(segment.keyReferenceQueue, key, hash, next);
}
@Override
ReferenceEntry copyEntry(
Segment segment, ReferenceEntry original, ReferenceEntry newNext) {
ReferenceEntry newEntry = super.copyEntry(segment, original, newNext);
copyAccessEntry(original, newEntry);
copyWriteEntry(original, newEntry);
return newEntry;
}
};
/**
* Masks used to compute indices in the following table.
*/
static final int ACCESS_MASK = 1;
static final int WRITE_MASK = 2;
static final int WEAK_MASK = 4;
/**
* Look-up table for factories.
*/
static final EntryFactory[] factories = {
STRONG, STRONG_ACCESS, STRONG_WRITE, STRONG_ACCESS_WRITE,
WEAK, WEAK_ACCESS, WEAK_WRITE, WEAK_ACCESS_WRITE,
};
static EntryFactory getFactory(Strength keyStrength, boolean usesAccessQueue,
boolean usesWriteQueue) {
int flags = ((keyStrength == Strength.WEAK) ? WEAK_MASK : 0)
| (usesAccessQueue ? ACCESS_MASK : 0)
| (usesWriteQueue ? WRITE_MASK : 0);
return factories[flags];
}
/**
* Creates a new entry.
*
* @param segment to create the entry for
* @param key of the entry
* @param hash of the key
* @param next entry in the same bucket
*/
abstract ReferenceEntry newEntry(
Segment segment, K key, int hash, ReferenceEntry next);
/**
* Copies an entry, assigning it a new {@code next} entry.
*
* @param original the entry to copy
* @param newNext entry in the same bucket
*/
// Guarded By Segment.this
ReferenceEntry copyEntry(
Segment segment, ReferenceEntry original, ReferenceEntry newNext) {
return newEntry(segment, original.getKey(), original.getHash(), newNext);
}
// Guarded By Segment.this
void copyAccessEntry(ReferenceEntry original, ReferenceEntry newEntry) {
// TODO(fry): when we link values instead of entries this method can go
// away, as can connectAccessOrder, nullifyAccessOrder.
newEntry.setAccessTime(original.getAccessTime());
connectAccessOrder(original.getPreviousInAccessQueue(), newEntry);
connectAccessOrder(newEntry, original.getNextInAccessQueue());
nullifyAccessOrder(original);
}
// Guarded By Segment.this
void copyWriteEntry(ReferenceEntry original, ReferenceEntry newEntry) {
// TODO(fry): when we link values instead of entries this method can go
// away, as can connectWriteOrder, nullifyWriteOrder.
newEntry.setWriteTime(original.getWriteTime());
connectWriteOrder(original.getPreviousInWriteQueue(), newEntry);
connectWriteOrder(newEntry, original.getNextInWriteQueue());
nullifyWriteOrder(original);
}
}
private enum NullEntry implements ReferenceEntry {
INSTANCE;
@Override
public ValueReference getValueReference() {
return null;
}
@Override
public void setValueReference(ValueReference valueReference) {
}
@Override
public ReferenceEntry getNext() {
return null;
}
@Override
public int getHash() {
return 0;
}
@Override
public Object getKey() {
return null;
}
@Override
public long getAccessTime() {
return 0;
}
@Override
public void setAccessTime(long time) {
}
@Override
public ReferenceEntry getNextInAccessQueue() {
return this;
}
@Override
public void setNextInAccessQueue(ReferenceEntry next) {
}
@Override
public ReferenceEntry getPreviousInAccessQueue() {
return this;
}
@Override
public void setPreviousInAccessQueue(ReferenceEntry previous) {
}
@Override
public long getWriteTime() {
return 0;
}
@Override
public void setWriteTime(long time) {
}
@Override
public ReferenceEntry getNextInWriteQueue() {
return this;
}
@Override
public void setNextInWriteQueue(ReferenceEntry next) {
}
@Override
public ReferenceEntry getPreviousInWriteQueue() {
return this;
}
@Override
public void setPreviousInWriteQueue(ReferenceEntry previous) {
}
}
/**
* A reference to a value.
*/
interface ValueReference {
/**
* Returns the value. Does not block or throw exceptions.
*/
V get();
/**
* Waits for a value that may still be loading. Unlike get(), this method can block (in the
* case of FutureValueReference).
*
* @throws ExecutionException if the loading thread throws an exception
* @throws ExecutionError if the loading thread throws an error
*/
V waitForValue() throws ExecutionException;
/**
* Returns the weight of this entry. This is assumed to be static between calls to setValue.
*/
int getWeight();
/**
* Returns the entry associated with this value reference, or {@code null} if this value
* reference is independent of any entry.
*/
ReferenceEntry getEntry();
/**
* Creates a copy of this reference for the given entry.
*
*
{@code value} may be null only for a loading reference.
*/
ValueReference copyFor(
ReferenceQueue queue, V value, ReferenceEntry entry);
/**
* Notifify pending loads that a new value was set. This is only relevant to loading
* value references.
*/
void notifyNewValue(V newValue);
/**
* Returns true if a new value is currently loading, regardless of whether or not there is an
* existing value. It is assumed that the return value of this method is constant for any given
* ValueReference instance.
*/
boolean isLoading();
/**
* Returns true if this reference contains an active value, meaning one that is still considered
* present in the cache. Active values consist of live values, which are returned by cache
* lookups, and dead values, which have been evicted but awaiting removal. Non-active values
* consist strictly of loading values, though during refresh a value may be both active and
* loading.
*/
boolean isActive();
}
/**
* An entry in a reference map.
*
* Entries in the map can be in the following states:
*
* Valid:
* - Live: valid key/value are set
* - Loading: loading is pending
*
* Invalid:
* - Expired: time expired (key/value may still be set)
* - Collected: key/value was partially collected, but not yet cleaned up
* - Unset: marked as unset, awaiting cleanup or reuse
*/
interface ReferenceEntry {
/**
* Returns the value reference from this entry.
*/
ValueReference getValueReference();
/**
* Sets the value reference for this entry.
*/
void setValueReference(ValueReference valueReference);
/**
* Returns the next entry in the chain.
*/
ReferenceEntry getNext();
/**
* Returns the entry's hash.
*/
int getHash();
/**
* Returns the key for this entry.
*/
K getKey();
/*
* Used by entries that use access order. Access entries are maintained in a doubly-linked list.
* New entries are added at the tail of the list at write time; stale entries are expired from
* the head of the list.
*/
/**
* Returns the time that this entry was last accessed, in ns.
*/
long getAccessTime();
/**
* Sets the entry access time in ns.
*/
void setAccessTime(long time);
/**
* Returns the next entry in the access queue.
*/
ReferenceEntry getNextInAccessQueue();
/**
* Sets the next entry in the access queue.
*/
void setNextInAccessQueue(ReferenceEntry next);
/**
* Returns the previous entry in the access queue.
*/
ReferenceEntry getPreviousInAccessQueue();
/**
* Sets the previous entry in the access queue.
*/
void setPreviousInAccessQueue(ReferenceEntry previous);
/*
* Implemented by entries that use write order. Write entries are maintained in a
* doubly-linked list. New entries are added at the tail of the list at write time and stale
* entries are expired from the head of the list.
*/
/**
* Returns the time that this entry was last written, in ns.
*/
long getWriteTime();
/**
* Sets the entry write time in ns.
*/
void setWriteTime(long time);
/**
* Returns the next entry in the write queue.
*/
ReferenceEntry getNextInWriteQueue();
/**
* Sets the next entry in the write queue.
*/
void setNextInWriteQueue(ReferenceEntry next);
/**
* Returns the previous entry in the write queue.
*/
ReferenceEntry getPreviousInWriteQueue();
/**
* Sets the previous entry in the write queue.
*/
void setPreviousInWriteQueue(ReferenceEntry previous);
}
abstract static class AbstractReferenceEntry implements ReferenceEntry {
@Override
public ValueReference getValueReference() {
throw new UnsupportedOperationException();
}
@Override
public void setValueReference(ValueReference valueReference) {
throw new UnsupportedOperationException();
}
@Override
public ReferenceEntry getNext() {
throw new UnsupportedOperationException();
}
@Override
public int getHash() {
throw new UnsupportedOperationException();
}
@Override
public K getKey() {
throw new UnsupportedOperationException();
}
@Override
public long getAccessTime() {
throw new UnsupportedOperationException();
}
@Override
public void setAccessTime(long time) {
throw new UnsupportedOperationException();
}
@Override
public ReferenceEntry getNextInAccessQueue() {
throw new UnsupportedOperationException();
}
@Override
public void setNextInAccessQueue(ReferenceEntry next) {
throw new UnsupportedOperationException();
}
@Override
public ReferenceEntry getPreviousInAccessQueue() {
throw new UnsupportedOperationException();
}
@Override
public void setPreviousInAccessQueue(ReferenceEntry previous) {
throw new UnsupportedOperationException();
}
@Override
public long getWriteTime() {
throw new UnsupportedOperationException();
}
@Override
public void setWriteTime(long time) {
throw new UnsupportedOperationException();
}
@Override
public ReferenceEntry getNextInWriteQueue() {
throw new UnsupportedOperationException();
}
@Override
public void setNextInWriteQueue(ReferenceEntry next) {
throw new UnsupportedOperationException();
}
@Override
public ReferenceEntry getPreviousInWriteQueue() {
throw new UnsupportedOperationException();
}
@Override
public void setPreviousInWriteQueue(ReferenceEntry previous) {
throw new UnsupportedOperationException();
}
}
/**
* Used for strongly-referenced keys.
*/
static class StrongEntry extends AbstractReferenceEntry {
final K key;
final int hash;
final ReferenceEntry next;
// The code below is exactly the same for each entry type.
volatile ValueReference valueReference = unset();
StrongEntry(K key, int hash, ReferenceEntry next) {
this.key = key;
this.hash = hash;
this.next = next;
}
@Override
public K getKey() {
return this.key;
}
@Override
public ValueReference getValueReference() {
return valueReference;
}
@Override
public void setValueReference(ValueReference valueReference) {
this.valueReference = valueReference;
}
@Override
public int getHash() {
return hash;
}
@Override
public ReferenceEntry getNext() {
return next;
}
}
static final class StrongAccessEntry extends StrongEntry {
volatile long accessTime = Long.MAX_VALUE;
// The code below is exactly the same for each access entry type.
// Guarded By Segment.this
ReferenceEntry nextAccess = nullEntry();
// Guarded By Segment.this
ReferenceEntry previousAccess = nullEntry();
StrongAccessEntry(K key, int hash, ReferenceEntry next) {
super(key, hash, next);
}
@Override
public long getAccessTime() {
return accessTime;
}
@Override
public void setAccessTime(long time) {
this.accessTime = time;
}
@Override
public ReferenceEntry getNextInAccessQueue() {
return nextAccess;
}
@Override
public void setNextInAccessQueue(ReferenceEntry next) {
this.nextAccess = next;
}
@Override
public ReferenceEntry getPreviousInAccessQueue() {
return previousAccess;
}
@Override
public void setPreviousInAccessQueue(ReferenceEntry previous) {
this.previousAccess = previous;
}
}
static final class StrongWriteEntry extends StrongEntry {
volatile long writeTime = Long.MAX_VALUE;
// The code below is exactly the same for each write entry type.
// Guarded By Segment.this
ReferenceEntry nextWrite = nullEntry();
// Guarded By Segment.this
ReferenceEntry previousWrite = nullEntry();
StrongWriteEntry(K key, int hash, ReferenceEntry next) {
super(key, hash, next);
}
@Override
public long getWriteTime() {
return writeTime;
}
@Override
public void setWriteTime(long time) {
this.writeTime = time;
}
@Override
public ReferenceEntry getNextInWriteQueue() {
return nextWrite;
}
@Override
public void setNextInWriteQueue(ReferenceEntry next) {
this.nextWrite = next;
}
@Override
public ReferenceEntry getPreviousInWriteQueue() {
return previousWrite;
}
@Override
public void setPreviousInWriteQueue(ReferenceEntry previous) {
this.previousWrite = previous;
}
}
static final class StrongAccessWriteEntry extends StrongEntry {
volatile long accessTime = Long.MAX_VALUE;
// The code below is exactly the same for each access entry type.
// Guarded By Segment.this
ReferenceEntry nextAccess = nullEntry();
// Guarded By Segment.this
ReferenceEntry previousAccess = nullEntry();
volatile long writeTime = Long.MAX_VALUE;
// Guarded By Segment.this
ReferenceEntry nextWrite = nullEntry();
// Guarded By Segment.this
ReferenceEntry previousWrite = nullEntry();
StrongAccessWriteEntry(K key, int hash, ReferenceEntry next) {
super(key, hash, next);
}
@Override
public long getAccessTime() {
return accessTime;
}
@Override
public void setAccessTime(long time) {
this.accessTime = time;
}
@Override
public ReferenceEntry getNextInAccessQueue() {
return nextAccess;
}
// The code below is exactly the same for each write entry type.
@Override
public void setNextInAccessQueue(ReferenceEntry next) {
this.nextAccess = next;
}
@Override
public ReferenceEntry