
commons.box.app.SafeLinkedMap Maven / Gradle / Ivy
Show all versions of commons-box-app Show documentation
package commons.box.app;
import java.io.InvalidObjectException;
import java.io.ObjectInputStream;
import java.io.Serializable;
import java.lang.ref.WeakReference;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicIntegerArray;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/*****************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
****************************************************************/
/*
* Copyright 2010 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* SafeLinkedMap 实现
*
* 参考实现
*
* https://github.com/apache/cayenne/tree/master/cayenne-server/src/main/java/org/apache/cayenne/util/concurrentlinkedhashmap
*
* // based on http://concurrentlinkedhashmap.googlecode.com/svn/trunk r754
*
* A hash table supporting full concurrency of retrievals, adjustable expected concurrency
* for updates, and a maximum capacity to bound the map by. This implementation differs
* from {@link ConcurrentHashMap} in that it maintains a page replacement algorithm that
* is used to evict an entry when the map has exceeded its capacity. Unlike the
* Java Collections Framework, this map does not have a publicly visible
* constructor and instances are created through a {@link Builder}.
*
* An entry is evicted from the map when the weighted capacity exceeds its
* maximum weighted capacity threshold. A {@link Weigher} instance determines how
* many units of capacity that a value consumes. The default weigher assigns each value a
* weight of 1 to bound the map by the total number of key-value pairs. A map
* that holds collections may choose to weigh values by the number of elements in the
* collection and bound the map by the total number of elements that it contains. A change
* to a value that modifies its weight requires that an update operation is performed on
* the map.
*
* An {@link EvictionListener} may be supplied for notification when an entry is evicted
* from the map. This listener is invoked on a caller's thread and will not block other
* threads from operating on the map. An implementation should be aware that the caller's
* thread will not expect long execution times or failures as a side effect of the
* listener being notified. Execution safety and a fast turn around time can be achieved
* by performing the operation asynchronously, such as by submitting a task to an
* {@link java.util.concurrent.ExecutorService}.
*
* The concurrency level determines the number of threads that can concurrently
* modify the table. Using a significantly higher or lower value than needed can waste
* space or lead to thread contention, but an estimate within an order of magnitude of the
* ideal value does not usually have a noticeable impact. Because placement in hash tables
* is essentially random, the actual concurrency will vary.
*
* This class and its views and iterators implement all of the optional methods
* of the {@link Map} and {@link Iterator} interfaces.
*
* Like {@link java.util.Hashtable} but unlike {@link HashMap}, this class does
* not allow null to be used as a key or value. Unlike
* {@link java.util.LinkedHashMap}, this class does not provide predictable
* iteration order. A snapshot of the keys and entries may be obtained in ascending and
* descending order of retention.
*
* @param the type of keys maintained by this map
* @param the type of mapped values
*
* 创建作者:xingxiuyi
* 版权所属:xingxiuyi
*/
public class SafeLinkedMap extends AbstractMap {
/*
* This class performs a best-effort bounding of a ConcurrentHashMap using a
* page-replacement algorithm to determine which entries to evict when the capacity is
* exceeded. The page replacement algorithm's data structures are kept eventually
* consistent with the map. An update to the map and recording of reads may not be
* immediately reflected on the algorithm's data structures. These structures are
* guarded by a lock and operations are applied in batches to avoid lock contention.
* The penalty of applying the batches is spread across threads so that the amortized
* cost is slightly higher than performing just the ConcurrentHashMap operation. A
* memento of the reads and writes that were performed on the map are recorded in a
* buffer. These buffers are drained at the first opportunity after a write or when a
* buffer exceeds a threshold size. A mostly strict ordering is achieved by observing
* that each buffer is in a weakly sorted order relative to the last drain. This
* allows the buffers to be merged in O(n) time so that the operations are run in the
* expected order. Due to a lack of a strict ordering guarantee, a task can be
* executed out-of-order, such as a removal followed by its addition. The state of the
* entry is encoded within the value's weight. Alive: The entry is in both the
* hash-table and the page replacement policy. This is represented by a positive
* weight. Retired: The entry is not in the hash-table and is pending removal from the
* page replacement policy. This is represented by a negative weight. Dead: The entry
* is not in the hash-table and is not in the page replacement policy. This is
* represented by a weight of zero. The Least Recently Used page replacement algorithm
* was chosen due to its simplicity, high hit rate, and ability to be implemented with
* O(1) time complexity.
*/
/**
* The maximum weighted capacity of the map.
*/
static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* The maximum weight of a value.
*/
static final int MAXIMUM_WEIGHT = 1 << 29;
/**
* The maximum number of pending operations per buffer.
*/
static final int MAXIMUM_BUFFER_SIZE = 1 << 20;
/**
* The number of pending operations per buffer before attempting to drain.
*/
static final int BUFFER_THRESHOLD = 16;
/**
* The number of buffers to use.
*/
static final int NUMBER_OF_BUFFERS;
/**
* Mask value for indexing into the buffers.
*/
static final int BUFFER_MASK;
/**
* The maximum number of operations to perform per amortized drain.
*/
static final int AMORTIZED_DRAIN_THRESHOLD;
/**
* A queue that discards all entries.
*/
static final Queue> DISCARDING_QUEUE = new DiscardingQueue();
static {
int buffers = ceilingNextPowerOfTwo(Runtime.getRuntime().availableProcessors());
AMORTIZED_DRAIN_THRESHOLD = (1 + buffers) * BUFFER_THRESHOLD;
NUMBER_OF_BUFFERS = buffers;
BUFFER_MASK = buffers - 1;
}
static int ceilingNextPowerOfTwo(int x) {
// From Hacker's Delight, Chapter 3, Harry S. Warren Jr.
return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(x - 1));
}
/**
* The draining status of the buffers.
*/
enum DrainStatus {
/**
* A drain is not taking place.
*/
IDLE,
/**
* A drain is required due to a pending write modification.
*/
REQUIRED,
/**
* A drain is in progress.
*/
PROCESSING
}
// The backing data store holding the key-value associations
final ConcurrentMap data;
final int concurrencyLevel;
// These fields provide support to bound the map by a maximum capacity
final SafeLinkedDeque evictionDeque;
// must write under lock
volatile int weightedSize;
// must write under lock
volatile int capacity;
volatile int nextOrder;
int drainedOrder;
final Lock evictionLock;
final Queue[] buffers;
final ExecutorService executor;
final Weigher super V> weigher;
final AtomicIntegerArray bufferLengths;
final AtomicReference drainStatus;
// These fields provide support for notifying a listener.
final Queue pendingNotifications;
final EvictionListener listener;
transient Set keySet;
transient Collection values;
transient Set> entrySet;
/**
* Creates an instance based on the builder's configuration.
*/
@SuppressWarnings({
"unchecked", "cast"
})
private SafeLinkedMap(Builder builder) {
// The data store and its maximum capacity
concurrencyLevel = builder.concurrencyLevel;
capacity = Math.min(builder.capacity, MAXIMUM_CAPACITY);
data = new ConcurrentHashMap<>(
builder.initialCapacity,
0.75f,
concurrencyLevel);
// The eviction support
weigher = builder.weigher;
executor = builder.executor;
nextOrder = Integer.MIN_VALUE;
drainedOrder = Integer.MIN_VALUE;
evictionLock = new ReentrantLock();
evictionDeque = new SafeLinkedDeque<>();
drainStatus = new AtomicReference<>(DrainStatus.IDLE);
buffers = (Queue[]) new Queue[NUMBER_OF_BUFFERS];
bufferLengths = new AtomicIntegerArray(NUMBER_OF_BUFFERS);
for (int i = 0; i < NUMBER_OF_BUFFERS; i++) {
buffers[i] = new ConcurrentLinkedQueue<>();
}
// The notification queue and listener
listener = builder.listener;
pendingNotifications = (listener == DiscardingListener.INSTANCE)
? (Queue) DISCARDING_QUEUE
: new ConcurrentLinkedQueue();
}
/**
* Asserts that the object is not null.
*/
static void checkNotNull(Object o) {
if (o == null) {
throw new NullPointerException();
}
}
/* ---------------- Eviction Support -------------- */
/**
* Retrieves the maximum weighted capacity of the map.
*
* @return the maximum weighted capacity
*/
public int capacity() {
return capacity;
}
/**
* Sets the maximum weighted capacity of the map and eagerly evicts entries until it
* shrinks to the appropriate size.
*
* @param capacity the maximum weighted capacity of the map
* @throws IllegalArgumentException if the capacity is negative
*/
public void setCapacity(int capacity) {
if (capacity < 0) {
throw new IllegalArgumentException();
}
evictionLock.lock();
try {
this.capacity = Math.min(capacity, MAXIMUM_CAPACITY);
drainBuffers(AMORTIZED_DRAIN_THRESHOLD);
evict();
} finally {
evictionLock.unlock();
}
notifyListener();
}
/**
* Determines whether the map has exceeded its capacity.
*/
boolean hasOverflowed() {
return weightedSize > capacity;
}
/**
* Evicts entries from the map while it exceeds the capacity and appends evicted
* entries to the notification queue for processing.
*/
void evict() {
// Attempts to evict entries from the map if it exceeds the maximum
// capacity. If the eviction fails due to a concurrent removal of the
// victim, that removal may cancel out the addition that triggered this
// eviction. The victim is eagerly unlinked before the removal task so
// that if an eviction is still required then a new victim will be chosen
// for removal.
while (hasOverflowed()) {
Node node = evictionDeque.poll();
// If weighted values are used, then the pending operations will adjust
// the size to reflect the correct weight
if (node == null) {
return;
}
// Notify the listener only if the entry was evicted
if (data.remove(node.key, node)) {
pendingNotifications.add(node);
}
node.makeDead();
}
}
/**
* Performs the post-processing work required after the map operation.
*
* @param task the pending operation to be applied
*/
void afterCompletion(Task task) {
boolean delayable = schedule(task);
if (shouldDrainBuffers(delayable)) {
tryToDrainBuffers(AMORTIZED_DRAIN_THRESHOLD);
}
notifyListener();
}
/**
* Schedules the task to be applied to the page replacement policy.
*
* @param task the pending operation
* @return if the draining of the buffers can be delayed
*/
private boolean schedule(Task task) {
int index = bufferIndex();
int buffered = bufferLengths.incrementAndGet(index);
if (task.isWrite()) {
buffers[index].add(task);
drainStatus.set(DrainStatus.REQUIRED);
return false;
}
// A buffer may discard a read task if its length exceeds a tolerance level
if (buffered <= MAXIMUM_BUFFER_SIZE) {
buffers[index].add(task);
return (buffered <= BUFFER_THRESHOLD);
} else { // not optimized for fail-safe scenario
bufferLengths.decrementAndGet(index);
return false;
}
}
/**
* Returns the index to the buffer that the task should be scheduled on.
*/
static int bufferIndex() {
// A buffer is chosen by the thread's id so that tasks are distributed in a
// pseudo evenly manner. This helps avoid hot entries causing contention due
// to other threads trying to append to the same buffer.
return (int) Thread.currentThread().getId() & BUFFER_MASK;
}
/**
* Returns the ordering value to assign to a task.
*/
int nextOrdering() {
// The next ordering is acquired in a racy fashion as the increment is not
// atomic with the insertion into a buffer. This means that concurrent tasks
// can have the same ordering and the buffers are in a weakly sorted order.
return nextOrder++;
}
/**
* Determines whether the buffers should be drained.
*
* @param delayable if a drain should be delayed until required
* @return if a drain should be attempted
*/
boolean shouldDrainBuffers(boolean delayable) {
if (executor.isShutdown()) {
DrainStatus status = drainStatus.get();
return (status != DrainStatus.PROCESSING)
& (!delayable | (status == DrainStatus.REQUIRED));
}
return false;
}
/**
* Attempts to acquire the eviction lock and apply the pending operations to the page
* replacement policy.
*
* @param maxToDrain the maximum number of operations to drain
*/
void tryToDrainBuffers(int maxToDrain) {
if (evictionLock.tryLock()) {
try {
drainStatus.set(DrainStatus.PROCESSING);
drainBuffers(maxToDrain);
} finally {
drainStatus.compareAndSet(DrainStatus.PROCESSING, DrainStatus.IDLE);
evictionLock.unlock();
}
}
}
/**
* Drains the buffers and applies the pending operations.
*
* @param maxToDrain the maximum number of operations to drain
*/
void drainBuffers(int maxToDrain) {
// A mostly strict ordering is achieved by observing that each buffer
// contains tasks in a weakly sorted order starting from the last drain.
// The buffers can be merged into a sorted list in O(n) time by using
// counting sort and chaining on a collision.
// The output is capped to the expected number of tasks plus additional
// slack to optimistically handle the concurrent additions to the buffers.
Task[] tasks = new Task[maxToDrain];
// Moves the tasks into the output array, applies them, and updates the
// marker for the starting order of the next drain.
int maxTaskIndex = moveTasksFromBuffers(tasks);
runTasks(tasks, maxTaskIndex);
updateDrainedOrder(tasks, maxTaskIndex);
}
/**
* Moves the tasks from the buffers into the output array.
*
* @param tasks the ordered array of the pending operations
* @return the highest index location of a task that was added to the array
*/
int moveTasksFromBuffers(Task[] tasks) {
int maxTaskIndex = -1;
for (int i = 0; i < buffers.length; i++) {
int maxIndex = moveTasksFromBuffer(tasks, i);
maxTaskIndex = Math.max(maxIndex, maxTaskIndex);
}
return maxTaskIndex;
}
/**
* Moves the tasks from the specified buffer into the output array.
*
* @param tasks the ordered array of the pending operations
* @param bufferIndex the buffer to drain into the tasks array
* @return the highest index location of a task that was added to the array
*/
int moveTasksFromBuffer(Task[] tasks, int bufferIndex) {
// While a buffer is being drained it may be concurrently appended to. The
// number of tasks removed are tracked so that the length can be decremented
// by the delta rather than set to zero.
Queue buffer = buffers[bufferIndex];
int removedFromBuffer = 0;
Task task;
int maxIndex = -1;
while ((task = buffer.poll()) != null) {
removedFromBuffer++;
// The index into the output array is determined by calculating the offset
// since the last drain
int index = task.getOrder() - drainedOrder;
if (index < 0) {
// The task was missed by the last drain and can be run immediately
task.run();
} else if (index >= tasks.length) {
// Due to concurrent additions, the order exceeds the capacity of the
// output array. It is added to the end as overflow and the remaining
// tasks in the buffer will be handled by the next drain.
maxIndex = tasks.length - 1;
addTaskToChain(tasks, task, maxIndex);
break;
} else {
maxIndex = Math.max(index, maxIndex);
addTaskToChain(tasks, task, index);
}
}
bufferLengths.addAndGet(bufferIndex, -removedFromBuffer);
return maxIndex;
}
/**
* Adds the task as the head of the chain at the index location.
*
* @param tasks the ordered array of the pending operations
* @param task the pending operation to add
* @param index the array location
*/
void addTaskToChain(Task[] tasks, Task task, int index) {
task.setNext(tasks[index]);
tasks[index] = task;
}
/**
* Runs the pending page replacement policy operations.
*
* @param tasks the ordered array of the pending operations
* @param maxTaskIndex the maximum index of the array
*/
void runTasks(Task[] tasks, int maxTaskIndex) {
for (int i = 0; i <= maxTaskIndex; i++) {
runTasksInChain(tasks[i]);
}
}
/**
* Runs the pending operations on the linked chain.
*
* @param task the first task in the chain of operations
*/
void runTasksInChain(Task task) {
while (task != null) {
Task current = task;
task = task.getNext();
current.setNext(null);
current.run();
}
}
/**
* Updates the order to start the next drain from.
*
* @param tasks the ordered array of operations
* @param maxTaskIndex the maximum index of the array
*/
void updateDrainedOrder(Task[] tasks, int maxTaskIndex) {
if (maxTaskIndex >= 0) {
Task task = tasks[maxTaskIndex];
drainedOrder = task.getOrder() + 1;
}
}
/**
* Notifies the listener of entries that were evicted.
*/
void notifyListener() {
Node node;
while ((node = pendingNotifications.poll()) != null) {
listener.onEviction(node.key, node.getValue());
}
}
/**
* Updates the node's location in the page replacement policy.
*/
class ReadTask extends AbstractTask {
final Node node;
ReadTask(Node node) {
this.node = node;
}
public void run() {
// An entry may scheduled for reordering despite having been previously
// removed. This can occur when the entry was concurrently read while a
// writer was removing it. If the entry is no longer linked then it does
// not need to be processed.
if (evictionDeque.contains(node)) {
evictionDeque.moveToBack(node);
}
}
public boolean isWrite() {
return false;
}
}
/**
* Adds the node to the page replacement policy.
*/
final class AddTask extends AbstractTask {
final Node node;
final int weight;
AddTask(Node node, int weight) {
this.weight = weight;
this.node = node;
}
public void run() {
weightedSize += weight;
// ignore out-of-order write operations
if (node.get().isAlive()) {
evictionDeque.add(node);
evict();
}
}
public boolean isWrite() {
return true;
}
}
/**
* Removes a node from the page replacement policy.
*/
final class RemovalTask extends AbstractTask {
final Node node;
RemovalTask(Node node) {
this.node = node;
}
public void run() {
// add may not have been processed yet
evictionDeque.remove(node);
node.makeDead();
}
public boolean isWrite() {
return true;
}
}
/**
* Updates the weighted size and evicts an entry on overflow.
*/
final class UpdateTask extends ReadTask {
final int weightDifference;
public UpdateTask(Node node, int weightDifference) {
super(node);
this.weightDifference = weightDifference;
}
@Override
public void run() {
super.run();
weightedSize += weightDifference;
evict();
}
@Override
public boolean isWrite() {
return true;
}
}
/* ---------------- Concurrent Map Support -------------- */
@Override
public boolean isEmpty() {
return data.isEmpty();
}
@Override
public int size() {
return data.size();
}
/**
* Returns the weighted size of this map.
*
* @return the combined weight of the values in this map
*/
public int weightedSize() {
return Math.max(0, weightedSize);
}
@Override
public void clear() {
// The alternative is to iterate through the keys and call #remove(), which
// adds unnecessary contention on the eviction lock and buffers.
evictionLock.lock();
try {
Node node;
while ((node = evictionDeque.poll()) != null) {
data.remove(node.key, node);
node.makeDead();
}
// Drain the buffers and run only the write tasks
for (int i = 0; i < buffers.length; i++) {
Queue buffer = buffers[i];
int removed = 0;
Task task;
while ((task = buffer.poll()) != null) {
if (task.isWrite()) {
task.run();
}
removed++;
}
bufferLengths.addAndGet(i, -removed);
}
} finally {
evictionLock.unlock();
}
}
@Override
public boolean containsKey(Object key) {
return data.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
checkNotNull(value);
for (Node node : data.values()) {
if (node.getValue().equals(value)) {
return true;
}
}
return false;
}
@Override
public V get(Object key) {
final Node node = data.get(key);
if (node == null) {
return null;
}
afterCompletion(new ReadTask(node));
return node.getValue();
}
@Override
public V put(K key, V value) {
return put(key, value, false);
}
public V putIfAbsent(K key, V value) {
return put(key, value, true);
}
/**
* Adds a node to the list and the data store. If an existing node is found, then its
* value is updated if allowed.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @param onlyIfAbsent a write is performed only if the key is not already associated
* with a value
* @return the prior value in the data store or null if no mapping was found
*/
@SuppressWarnings("Duplicates")
V put(K key, V value, boolean onlyIfAbsent) {
checkNotNull(value);
final int weight = weigher.weightOf(value);
final WeightedValue weightedValue = new WeightedValue<>(value, weight);
final Node node = new Node(key, weightedValue);
for (; ; ) {
final Node prior = data.putIfAbsent(node.key, node);
if (prior == null) {
afterCompletion(new AddTask(node, weight));
return null;
} else if (onlyIfAbsent) {
afterCompletion(new ReadTask(prior));
return prior.getValue();
}
for (; ; ) {
final WeightedValue oldWeightedValue = prior.get();
if (!oldWeightedValue.isAlive()) {
break;
}
if (prior.compareAndSet(oldWeightedValue, weightedValue)) {
final int weightedDifference = weight - oldWeightedValue.weight;
final Task task = (weightedDifference == 0)
? new ReadTask(prior)
: new UpdateTask(prior, weightedDifference);
afterCompletion(task);
return oldWeightedValue.value;
}
}
}
}
@Override
public V remove(Object key) {
final Node node = data.remove(key);
if (node == null) {
return null;
}
node.makeRetired();
afterCompletion(new RemovalTask(node));
return node.getValue();
}
public boolean remove(Object key, Object value) {
Node node = data.get(key);
if ((node == null) || (value == null)) {
return false;
}
WeightedValue weightedValue = node.get();
for (; ; ) {
if (weightedValue.hasValue(value)) {
if (node.tryToRetire(weightedValue)) {
if (data.remove(key, node)) {
afterCompletion(new RemovalTask(node));
return true;
}
} else {
weightedValue = node.get();
if (weightedValue.isAlive()) {
// retry as an intermediate update may have replaced the value
// with
// an equal instance that has a different reference identity
continue;
}
}
}
return false;
}
}
@SuppressWarnings("Duplicates")
public V replace(K key, V value) {
checkNotNull(value);
final int weight = weigher.weightOf(value);
final WeightedValue weightedValue = new WeightedValue<>(value, weight);
final Node node = data.get(key);
if (node == null) {
return null;
}
for (; ; ) {
WeightedValue oldWeightedValue = node.get();
if (!oldWeightedValue.isAlive()) {
return null;
}
if (node.compareAndSet(oldWeightedValue, weightedValue)) {
int weightedDifference = weight - oldWeightedValue.weight;
final Task task = (weightedDifference == 0)
? new ReadTask(node)
: new UpdateTask(node, weightedDifference);
afterCompletion(task);
return oldWeightedValue.value;
}
}
}
public boolean replace(K key, V oldValue, V newValue) {
checkNotNull(oldValue);
checkNotNull(newValue);
final int weight = weigher.weightOf(newValue);
final WeightedValue newWeightedValue = new WeightedValue<>(newValue, weight);
final Node node = data.get(key);
if (node == null) {
return false;
}
for (; ; ) {
final WeightedValue weightedValue = node.get();
if (!weightedValue.isAlive() || !weightedValue.hasValue(oldValue)) {
return false;
}
if (node.compareAndSet(weightedValue, newWeightedValue)) {
int weightedDifference = weight - weightedValue.weight;
final Task task = (weightedDifference == 0)
? new ReadTask(node)
: new UpdateTask(node, weightedDifference);
afterCompletion(task);
return true;
}
}
}
@Override
public Set keySet() {
Set ks = keySet;
return (ks == null) ? (keySet = new KeySet()) : ks;
}
/**
* Returns a unmodifiable snapshot {@link Set} view of the keys contained in this map.
* The set's iterator returns the keys whose order of iteration is the ascending order
* in which its entries are considered eligible for retention, from the least-likely
* to be retained to the most-likely.
*
* Beware that, unlike in {@link #keySet()}, obtaining the set is NOT a
* constant-time operation. Because of the asynchronous nature of the page replacement
* policy, determining the retention ordering requires a traversal of the keys.
*
* @return an ascending snapshot view of the keys in this map
*/
public Set ascendingKeySet() {
return orderedKeySet(true, Integer.MAX_VALUE);
}
/**
* Returns an unmodifiable snapshot {@link Set} view of the keys contained in this
* map. The set's iterator returns the keys whose order of iteration is the ascending
* order in which its entries are considered eligible for retention, from the
* least-likely to be retained to the most-likely.
*
* Beware that, unlike in {@link #keySet()}, obtaining the set is NOT a
* constant-time operation. Because of the asynchronous nature of the page replacement
* policy, determining the retention ordering requires a traversal of the keys.
*
* @param limit the maximum size of the returned set
* @return a ascending snapshot view of the keys in this map
* @throws IllegalArgumentException if the limit is negative
*/
public Set ascendingKeySetWithLimit(int limit) {
return orderedKeySet(true, limit);
}
/**
* Returns an unmodifiable snapshot {@link Set} view of the keys contained in this
* map. The set's iterator returns the keys whose order of iteration is the descending
* order in which its entries are considered eligible for retention, from the
* most-likely to be retained to the least-likely.
*
* Beware that, unlike in {@link #keySet()}, obtaining the set is NOT a
* constant-time operation. Because of the asynchronous nature of the page replacement
* policy, determining the retention ordering requires a traversal of the keys.
*
* @return a descending snapshot view of the keys in this map
*/
public Set descendingKeySet() {
return orderedKeySet(false, Integer.MAX_VALUE);
}
/**
* Returns an unmodifiable snapshot {@link Set} view of the keys contained in this
* map. The set's iterator returns the keys whose order of iteration is the descending
* order in which its entries are considered eligible for retention, from the
* most-likely to be retained to the least-likely.
*
* Beware that, unlike in {@link #keySet()}, obtaining the set is NOT a
* constant-time operation. Because of the asynchronous nature of the page replacement
* policy, determining the retention ordering requires a traversal of the keys.
*
* @param limit the maximum size of the returned set
* @return a descending snapshot view of the keys in this map
* @throws IllegalArgumentException if the limit is negative
*/
public Set descendingKeySetWithLimit(int limit) {
return orderedKeySet(false, limit);
}
Set orderedKeySet(boolean ascending, int limit) {
if (limit < 0) {
throw new IllegalArgumentException();
}
evictionLock.lock();
try {
drainBuffers(AMORTIZED_DRAIN_THRESHOLD);
int initialCapacity = (weigher == Weighers.singleton()) ? Math.min(
limit,
weightedSize()) : 16;
Set keys = new LinkedHashSet<>(initialCapacity);
Iterator iterator = ascending
? evictionDeque.iterator()
: evictionDeque.descendingIterator();
while (iterator.hasNext() && (limit > keys.size())) {
keys.add(iterator.next().key);
}
return Collections.unmodifiableSet(keys);
} finally {
evictionLock.unlock();
}
}
@Override
public Collection values() {
Collection vs = values;
return (vs == null) ? (values = new Values()) : vs;
}
@Override
public Set> entrySet() {
Set> es = entrySet;
return (es == null) ? (entrySet = new EntrySet()) : es;
}
/**
* Returns an unmodifiable snapshot {@link Map} view of the mappings contained in this
* map. The map's collections return the mappings whose order of iteration is the
* ascending order in which its entries are considered eligible for retention, from
* the least-likely to be retained to the most-likely.
*
* Beware that obtaining the mappings is NOT a constant-time operation.
* Because of the asynchronous nature of the page replacement policy, determining the
* retention ordering requires a traversal of the entries.
*
* @return a ascending snapshot view of this map
*/
public Map ascendingMap() {
return orderedMap(true, Integer.MAX_VALUE);
}
/**
* Returns an unmodifiable snapshot {@link Map} view of the mappings contained in this
* map. The map's collections return the mappings whose order of iteration is the
* ascending order in which its entries are considered eligible for retention, from
* the least-likely to be retained to the most-likely.
*
* Beware that obtaining the mappings is NOT a constant-time operation.
* Because of the asynchronous nature of the page replacement policy, determining the
* retention ordering requires a traversal of the entries.
*
* @param limit the maximum size of the returned map
* @return a ascending snapshot view of this map
* @throws IllegalArgumentException if the limit is negative
*/
public Map ascendingMapWithLimit(int limit) {
return orderedMap(true, limit);
}
/**
* Returns an unmodifiable snapshot {@link Map} view of the mappings contained in this
* map. The map's collections return the mappings whose order of iteration is the
* descending order in which its entries are considered eligible for retention, from
* the most-likely to be retained to the least-likely.
*
* Beware that obtaining the mappings is NOT a constant-time operation.
* Because of the asynchronous nature of the page replacement policy, determining the
* retention ordering requires a traversal of the entries.
*
* @return a descending snapshot view of this map
*/
public Map descendingMap() {
return orderedMap(false, Integer.MAX_VALUE);
}
/**
* Returns an unmodifiable snapshot {@link Map} view of the mappings contained in this
* map. The map's collections return the mappings whose order of iteration is the
* descending order in which its entries are considered eligible for retention, from
* the most-likely to be retained to the least-likely.
*
* Beware that obtaining the mappings is NOT a constant-time operation.
* Because of the asynchronous nature of the page replacement policy, determining the
* retention ordering requires a traversal of the entries.
*
* @param limit the maximum size of the returned map
* @return a descending snapshot view of this map
* @throws IllegalArgumentException if the limit is negative
*/
public Map descendingMapWithLimit(int limit) {
return orderedMap(false, limit);
}
Map orderedMap(boolean ascending, int limit) {
if (limit < 0) {
throw new IllegalArgumentException();
}
evictionLock.lock();
try {
drainBuffers(AMORTIZED_DRAIN_THRESHOLD);
int initialCapacity = (weigher == Weighers.singleton()) ? Math.min(
limit,
weightedSize()) : 16;
Map map = new LinkedHashMap<>(initialCapacity);
Iterator iterator = ascending
? evictionDeque.iterator()
: evictionDeque.descendingIterator();
while (iterator.hasNext() && (limit > map.size())) {
Node node = iterator.next();
map.put(node.key, node.getValue());
}
return Collections.unmodifiableMap(map);
} finally {
evictionLock.unlock();
}
}
/**
* A value, its weight, and the entry's status.
*/
static final class WeightedValue {
final int weight;
final V value;
WeightedValue(V value, int weight) {
this.weight = weight;
this.value = value;
}
boolean hasValue(Object o) {
return (o == value) || value.equals(o);
}
/**
* If the entry is available in the hash-table and page replacement policy.
*/
boolean isAlive() {
return weight > 0;
}
/**
* If the entry was removed from the hash-table and is awaiting removal from the
* page replacement policy.
*/
boolean isRetired() {
return weight < 0;
}
/**
* If the entry was removed from the hash-table and the page replacement policy.
*/
boolean isDead() {
return weight == 0;
}
}
/**
* A node contains the key, the weighted value, and the linkage pointers on the
* page-replacement algorithm's data structures.
*/
@SuppressWarnings("serial")
final class Node extends AtomicReference> implements Linked {
final K key;
Node prev;
Node next;
/**
* Creates a new, unlinked node.
*/
Node(K key, WeightedValue weightedValue) {
super(weightedValue);
this.key = key;
}
public Node getPrevious() {
return prev;
}
public void setPrevious(Node prev) {
this.prev = prev;
}
public Node getNext() {
return next;
}
public void setNext(Node next) {
this.next = next;
}
/**
* Retrieves the value held by the current WeightedValue.
*/
V getValue() {
return get().value;
}
/**
* Attempts to transition the node from the alive state to the
* retired state.
*
* @param expect the expected weighted value
* @return if successful
*/
boolean tryToRetire(WeightedValue expect) {
if (expect.isAlive()) {
WeightedValue retired = new WeightedValue<>(
expect.value,
-expect.weight);
return compareAndSet(expect, retired);
}
return false;
}
/**
* Atomically transitions the node from the alive state to the
* retired state, if a valid transition.
*/
void makeRetired() {
for (; ; ) {
WeightedValue current = get();
if (!current.isAlive()) {
return;
}
WeightedValue retired = new WeightedValue<>(
current.value,
-current.weight);
if (compareAndSet(current, retired)) {
return;
}
}
}
/**
* Atomically transitions the node to the dead state and decrements the
* weightedSize.
*/
void makeDead() {
for (; ; ) {
WeightedValue current = get();
WeightedValue dead = new WeightedValue<>(current.value, 0);
if (compareAndSet(current, dead)) {
weightedSize -= Math.abs(current.weight);
return;
}
}
}
}
/**
* An adapter to safely externalize the keys.
*/
final class KeySet extends AbstractSet {
final SafeLinkedMap map = SafeLinkedMap.this;
@Override
public int size() {
return map.size();
}
@Override
public void clear() {
map.clear();
}
@Override
public Iterator iterator() {
return new KeyIterator();
}
@Override
public boolean contains(Object obj) {
return containsKey(obj);
}
@Override
public boolean remove(Object obj) {
return (map.remove(obj) != null);
}
@Override
public Object[] toArray() {
return map.data.keySet().toArray();
}
@Override
public T[] toArray(T[] array) {
return map.data.keySet().toArray(array);
}
}
/**
* An adapter to safely externalize the key iterator.
*/
final class KeyIterator implements Iterator {
final Iterator iterator = data.keySet().iterator();
K current;
public boolean hasNext() {
return iterator.hasNext();
}
public K next() {
current = iterator.next();
return current;
}
public void remove() {
if (current == null) {
throw new IllegalStateException();
}
SafeLinkedMap.this.remove(current);
current = null;
}
}
/**
* An adapter to safely externalize the values.
*/
final class Values extends AbstractCollection {
@Override
public int size() {
return SafeLinkedMap.this.size();
}
@Override
public void clear() {
SafeLinkedMap.this.clear();
}
@Override
public Iterator iterator() {
return new ValueIterator();
}
@Override
public boolean contains(Object o) {
return containsValue(o);
}
}
/**
* An adapter to safely externalize the value iterator.
*/
final class ValueIterator implements Iterator {
final Iterator iterator = data.values().iterator();
Node current;
public boolean hasNext() {
return iterator.hasNext();
}
public V next() {
current = iterator.next();
return current.getValue();
}
public void remove() {
if (current == null) {
throw new IllegalStateException();
}
SafeLinkedMap.this.remove(current.key);
current = null;
}
}
/**
* An adapter to safely externalize the entries.
*/
final class EntrySet extends AbstractSet> {
final SafeLinkedMap map = SafeLinkedMap.this;
@Override
public int size() {
return map.size();
}
@Override
public void clear() {
map.clear();
}
@Override
public Iterator> iterator() {
return new EntryIterator();
}
@Override
public boolean contains(Object obj) {
if (!(obj instanceof Entry, ?>)) {
return false;
}
Entry, ?> entry = (Entry, ?>) obj;
Node node = map.data.get(entry.getKey());
return (node != null) && (node.getValue().equals(entry.getValue()));
}
@Override
public boolean add(Entry entry) {
return (map.putIfAbsent(entry.getKey(), entry.getValue()) == null);
}
@Override
public boolean remove(Object obj) {
if (!(obj instanceof Entry, ?>)) {
return false;
}
Entry, ?> entry = (Entry, ?>) obj;
return map.remove(entry.getKey(), entry.getValue());
}
}
/**
* An adapter to safely externalize the entry iterator.
*/
final class EntryIterator implements Iterator> {
final Iterator iterator = data.values().iterator();
Node current;
public boolean hasNext() {
return iterator.hasNext();
}
public Entry next() {
current = iterator.next();
return new WriteThroughEntry(current);
}
public void remove() {
if (current == null) {
throw new IllegalStateException();
}
SafeLinkedMap.this.remove(current.key);
current = null;
}
}
/**
* An entry that allows updates to write through to the map.
*/
final class WriteThroughEntry extends AbstractMap.SimpleEntry {
static final long serialVersionUID = 1;
WriteThroughEntry(Node node) {
super(node.key, node.getValue());
}
@Override
public V setValue(V value) {
put(getKey(), value);
return super.setValue(value);
}
Object writeReplace() {
return new AbstractMap.SimpleEntry<>(this);
}
}
/**
* A weigher that enforces that the weight falls within a valid range.
*/
static final class BoundedWeigher implements Weigher, Serializable {
static final long serialVersionUID = 1;
final Weigher super V> weigher;
BoundedWeigher(Weigher super V> weigher) {
checkNotNull(weigher);
this.weigher = weigher;
}
public int weightOf(V value) {
int weight = weigher.weightOf(value);
if ((weight < 1) || (weight > MAXIMUM_WEIGHT)) {
throw new IllegalArgumentException("invalid weight");
}
return weight;
}
Object writeReplace() {
return weigher;
}
}
/**
* A task that catches up the page replacement policy.
*/
static final class CatchUpTask implements Runnable {
final WeakReference> mapRef;
CatchUpTask(SafeLinkedMap, ?> map) {
this.mapRef = new WeakReference>(map);
}
public void run() {
SafeLinkedMap, ?> map = mapRef.get();
if (map == null) {
throw new CancellationException();
}
int pendingTasks = 0;
for (int i = 0; i < map.buffers.length; i++) {
pendingTasks += map.bufferLengths.get(i);
}
if (pendingTasks != 0) {
map.tryToDrainBuffers(pendingTasks + BUFFER_THRESHOLD);
}
}
}
/**
* An executor that is always terminated.
*/
static final class DisabledExecutorService extends AbstractExecutorService {
public boolean isShutdown() {
return true;
}
public boolean isTerminated() {
return true;
}
public void shutdown() {
}
public List shutdownNow() {
return Collections.emptyList();
}
public boolean awaitTermination(long timeout, TimeUnit unit) {
return true;
}
public void execute(Runnable command) {
throw new RejectedExecutionException();
}
}
/**
* A queue that discards all additions and is always empty.
*/
static final class DiscardingQueue extends AbstractQueue