com.github.shepherdviolet.glacimon.java.datastruc.cache.LruCache Maven / Gradle / Ivy
/*
* Copyright (C) 2022-2022 S.Violet
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Project GitHub: https://github.com/shepherdviolet/glacimon
* Email: [email protected]
*/
package com.github.shepherdviolet.glacimon.java.datastruc.cache;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* BEGIN LAYOUTLIB CHANGE
* This is a custom version that doesn't use the non standard LinkedHashMap#eldest.
* END LAYOUTLIB CHANGE
*
* A cache that holds strong references to a limited number of values. Each time
* a value is accessed, it is moved to the head of a queue. When a value is
* added to a full cache, the value at the end of that queue is evicted and may
* become eligible for garbage collection.
*
* If your cached values hold resources that need to be explicitly released,
* override {@link #entryRemoved}.
*
*
If a cache miss should be computed on demand for the corresponding keys,
* override {@link #create}. This simplifies the calling code, allowing it to
* assume a value will always be returned, even when there's a cache miss.
*
*
By default, the cache size is measured in the number of entries. Override
* {@link #sizeOf} to size the cache in different units. For example, this cache
* is limited to 4MiB of bitmaps:
*
{@code
* int cacheSize = 4 * 1024 * 1024; // 4MiB
* LruCache bitmapCache = new LruCache(cacheSize) {
* protected int sizeOf(String key, Bitmap value) {
* return value.getByteCount();
* }
* }}
*
* This class is thread-safe. Perform multiple cache operations atomically by
* synchronizing on the cache:
{@code
* synchronized (cache) {
* if (cache.get(key) == null) {
* cache.put(key, value);
* }
* }}
*
* This class does not allow null to be used as a key or value. A return
* value of null from {@link #get}, {@link #put} or {@link #remove} is
* unambiguous: the key was not in the cache.
*
*
This class appeared in Android 3.1 (Honeycomb MR1); it's available as part
* of Android's
* Support Package for earlier releases.
*
* @author Google
*/
public class LruCache {
private final LinkedHashMap map;
/** Size of this cache in units. Not necessarily the number of elements. */
private int size;
private int maxSize;
private int putCount;
private int createCount;
private int evictionCount;
private int hitCount;
private int missCount;
private boolean trimRecentItem = false;
/**
* @param maxSize for caches that do not override {@link #sizeOf}, this is
* the maximum number of entries in the cache. For all other caches,
* this is the maximum sum of the sizes of the entries in this cache.
*/
public LruCache(int maxSize){
this(maxSize, false);
}
/**
* @param maxSize for caches that do not override {@link #sizeOf}, this is
* the maximum number of entries in the cache. For all other caches,
* this is the maximum sum of the sizes of the entries in this cache.
* @param trimRecentItem if true, we will trim recent or most used item first,
* if false, we will trim oldest or less used item first, false by default
*/
public LruCache(int maxSize, boolean trimRecentItem) {
if (maxSize <= 0) {
throw new IllegalArgumentException("maxSize <= 0");
}
this.maxSize = maxSize;
this.map = new LinkedHashMap(0, 0.75f, true);
this.trimRecentItem = trimRecentItem;
}
/**
* Sets the size of the cache.
* @param maxSize The new maximum size.
*/
public void resize(int maxSize) {
if (maxSize <= 0) {
throw new IllegalArgumentException("maxSize <= 0");
}
synchronized (this) {
this.maxSize = maxSize;
}
trimToSize(maxSize);
}
/**
* Returns the value for {@code key} if it exists in the cache or can be
* created by {@code #create}. If a value was returned, it is moved to the
* head of the queue. This returns null if a value is not cached and cannot
* be created.
*/
public V get(K key) {
if (key == null) {
throw new NullPointerException("key == null");
}
V mapValue;
synchronized (this) {
mapValue = map.get(key);
if (mapValue != null) {
hitCount++;
return mapValue;
}
missCount++;
}
/*
* Attempt to create a value. This may take a long time, and the map
* may be different when create() returns. If a conflicting value was
* added to the map while create() was working, we leave that value in
* the map and release the created value.
*/
V createdValue = create(key);
if (createdValue == null) {
return null;
}
synchronized (this) {
createCount++;
mapValue = map.put(key, createdValue);
if (mapValue != null) {
// There was a conflict so undo that last put
map.put(key, mapValue);
} else {
size += safeSizeOf(key, createdValue);
}
}
if (mapValue != null) {
entryRemoved(false, key, createdValue, mapValue);
return mapValue;
} else {
trimToSize(maxSize);
return createdValue;
}
}
/**
* Caches {@code value} for {@code key}. The value is moved to the head of
* the queue.
*
* @return the previous value mapped by {@code key}.
*/
public V put(K key, V value) {
if (key == null || value == null) {
throw new NullPointerException("key == null || value == null");
}
V previous;
synchronized (this) {
putCount++;
size += safeSizeOf(key, value);
previous = map.put(key, value);
if (previous != null) {
size -= safeSizeOf(key, previous);
}
}
if (previous != null) {
entryRemoved(false, key, previous, value);
}
trimToSize(maxSize);
return previous;
}
/**
* @param maxSize the maximum size of the cache before returning. May be -1
* to evict even 0-sized elements.
*/
protected void trimToSize(int maxSize) {
while (true) {
K key;
V value;
synchronized (this) {
if (size < 0 || (map.isEmpty() && size != 0)) {
throw new IllegalStateException(getClass().getName()
+ ".sizeOf() is reporting inconsistent results!");
}
if (size <= maxSize) {
break;
}
// BEGIN LAYOUTLIB CHANGE
// get the last item in the linked list.
// This is not efficient, the goal here is to minimize the changes
// compared to the platform version.
Map.Entry toEvict = null;
for (Map.Entry entry : map.entrySet()) {
toEvict = entry;
//if true, we will trim recent or most used item first,
//if false, we will trim oldest or less used item first, false by default
if (!trimRecentItem){
break;
}
}
// END LAYOUTLIB CHANGE
if (toEvict == null) {
break;
}
key = toEvict.getKey();
value = toEvict.getValue();
map.remove(key);
size -= safeSizeOf(key, value);
evictionCount++;
}
entryRemoved(true, key, value, null);
}
}
/**
* Removes the entry for {@code key} if it exists.
*
* @return the previous value mapped by {@code key}.
*/
public V remove(K key) {
if (key == null) {
throw new NullPointerException("key == null");
}
V previous;
synchronized (this) {
previous = map.remove(key);
if (previous != null) {
size -= safeSizeOf(key, previous);
}
}
if (previous != null) {
entryRemoved(false, key, previous, null);
}
return previous;
}
/**
* Called for entries that have been evicted or removed. This method is
* invoked when a value is evicted to make space, removed by a call to
* {@link #remove}, or replaced by a call to {@link #put}. The default
* implementation does nothing.
*
* The method is called without synchronization: other threads may
* access the cache while this method is executing.
*
* @param evicted true if the entry is being removed to make space, false
* if the removal was caused by a {@link #put} or {@link #remove}.
* @param newValue the new value for {@code key}, if it exists. If non-null,
* this removal was caused by a {@link #put}. Otherwise it was caused by
* an eviction or a {@link #remove}.
*/
protected void entryRemoved(boolean evicted, K key, V oldValue, V newValue) {}
/**
* Called after a cache miss to compute a value for the corresponding key.
* Returns the computed value or null if no value can be computed. The
* default implementation returns null.
*
*
The method is called without synchronization: other threads may
* access the cache while this method is executing.
*
*
If a value for {@code key} exists in the cache when this method
* returns, the created value will be released with {@link #entryRemoved}
* and discarded. This can occur when multiple threads request the same key
* at the same time (causing multiple values to be created), or when one
* thread calls {@link #put} while another is creating a value for the same
* key.
*/
protected V create(K key) {
return null;
}
protected int safeSizeOf(K key, V value) {
int result = sizeOf(key, value);
if (result < 0) {
throw new IllegalStateException("Negative size: " + key + "=" + value);
}
return result;
}
/**
* Returns the size of the entry for {@code key} and {@code value} in
* user-defined units. The default implementation returns 1 so that size
* is the number of entries and max size is the maximum number of entries.
*
*
An entry's size must not change while it is in the cache.
*/
protected int sizeOf(K key, V value) {
return 1;
}
/**
* Clear the cache, calling {@link #entryRemoved} on each removed entry.
*/
public final void evictAll() {
// -1 will evict 0-sized elements
trimToSize(-1);
}
/**
* For caches that do not override {@link #sizeOf}, this returns the number
* of entries in the cache. For all other caches, this returns the sum of
* the sizes of the entries in this cache.
*/
public synchronized final int size() {
return size;
}
/**
* For caches that do not override {@link #sizeOf}, this returns the maximum
* number of entries in the cache. For all other caches, this returns the
* maximum sum of the sizes of the entries in this cache.
*/
public synchronized final int maxSize() {
return maxSize;
}
/**
* Returns the number of times {@link #get} returned a value that was
* already present in the cache.
*/
public synchronized final int hitCount() {
return hitCount;
}
/**
* Returns the number of times {@link #get} returned null or required a new
* value to be created.
*/
public synchronized final int missCount() {
return missCount;
}
/**
* Returns the number of times {@link #create(Object)} returned a value.
*/
public synchronized final int createCount() {
return createCount;
}
/**
* Returns the number of times {@link #put} was called.
*/
public synchronized final int putCount() {
return putCount;
}
/**
* Returns the number of values that have been evicted.
*/
public synchronized final int evictionCount() {
return evictionCount;
}
/**
* Returns a copy of the current contents of the cache, ordered from least
* recently accessed to most recently accessed.
*/
public synchronized final Map snapshot() {
return new LinkedHashMap(map);
}
@Override public synchronized final String toString() {
int accesses = hitCount + missCount;
int hitPercent = accesses != 0 ? (100 * hitCount / accesses) : 0;
return String.format("LruCache[maxSize=%d,hits=%d,misses=%d,hitRate=%d%%]",
maxSize, hitCount, missCount, hitPercent);
}
/////////////////////////////////////////////////////////////////////////////////////
// getter/setter
protected void setSize(int size) {
this.size = size;
}
protected void setEvictionCount(int evictionCount) {
this.evictionCount = evictionCount;
}
protected LinkedHashMap getMap() {
return map;
}
protected int getMaxSize(){
return maxSize;
}
protected int getEvictionCount() {
return evictionCount;
}
/**
* @return 缓存中的位图数量
*/
public int quantity(){
return map.size();
}
}