com.tangosol.net.cache.ReadWriteBackingMap Maven / Gradle / Ivy
Show all versions of coherence Show documentation
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates.
*
* Licensed under the Universal Permissive License v 1.0 as shown at
* https://oss.oracle.com/licenses/upl.
*/
package com.tangosol.net.cache;
import com.oracle.coherence.common.base.Blocking;
import com.tangosol.application.ContainerHelper;
import com.tangosol.coherence.config.Config;
import com.tangosol.internal.tracing.Scope;
import com.tangosol.internal.tracing.Span;
import com.tangosol.internal.tracing.SpanContext;
import com.tangosol.internal.tracing.TracingHelper;
import com.tangosol.license.CoherenceCommunityEdition;
import com.tangosol.net.BackingMapManagerContext;
import com.tangosol.net.CacheService;
import com.tangosol.net.Guardian;
import com.tangosol.net.Guardian.GuardContext;
import com.tangosol.net.GuardSupport;
import com.tangosol.util.AbstractKeyBasedMap;
import com.tangosol.util.Base;
import com.tangosol.util.Binary;
import com.tangosol.util.BinaryEntry;
import com.tangosol.util.ClassHelper;
import com.tangosol.util.ConcurrentMap;
import com.tangosol.util.ConverterCollections;
import com.tangosol.util.Daemon;
import com.tangosol.util.EntrySetMap;
import com.tangosol.util.ExternalizableHelper;
import com.tangosol.util.Filter;
import com.tangosol.util.InflatableList;
import com.tangosol.util.LongArray;
import com.tangosol.util.MapEvent;
import com.tangosol.util.MapListener;
import com.tangosol.util.MapListenerSupport;
import com.tangosol.util.NullImplementation;
import com.tangosol.util.ObservableMap;
import com.tangosol.util.RecyclingLinkedList;
import com.tangosol.util.SafeHashMap;
import com.tangosol.util.SafeHashSet;
import com.tangosol.util.SegmentedConcurrentMap;
import com.tangosol.util.SimpleEnumerator;
import com.tangosol.util.SimpleMapEntry;
import com.tangosol.util.SparseArray;
import com.tangosol.util.SubSet;
import com.tangosol.util.WrapperException;
import java.lang.reflect.Array;
import java.util.AbstractCollection;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
/**
* Backing Map implementation that provides a size-limited cache of a
* persistent store and supports configurable write-behind and refresh-
* ahead caching.
*
* This implementation is not intended to support null keys or null
* values.
*
* @author cp 2002.11.25
* @author jh 2005.02.08
*/
public class ReadWriteBackingMap
extends AbstractMap
implements CacheMap
{
// ----- constructors ---------------------------------------------------
/**
* Construct a ReadWriteBackingMap based on a CacheLoader (CacheStore).
*
* @param ctxService the context provided by the CacheService
* which is using this backing map
* @param mapInternal the ObservableMap used to store the data
* internally in this backing map
* @param mapMisses the Map used to cache CacheLoader misses (optional)
* @param loader the CacheLoader responsible for the persistence of
* the cached data (optional)
*/
public ReadWriteBackingMap(BackingMapManagerContext ctxService,
ObservableMap mapInternal, Map mapMisses, CacheLoader loader)
{
init(ctxService, mapInternal, mapMisses, loader, null, null, true, 0, 0.0);
}
/**
* Construct a ReadWriteBackingMap based on a CacheLoader (CacheStore).
*
* @param ctxService the context provided by the CacheService
* which is using this backing map
* @param mapInternal the ObservableMap used to store the data
* internally in this backing map
* @param mapMisses the Map used to cache CacheStore misses
* (optional)
* @param loader the CacheLoader responsible for the
* persistence of the cached data (optional)
* @param fReadOnly pass true is the specified loader is in fact
* a CacheStore that needs to be used only for
* read operations; changes to the cache will
* not be persisted
* @param cWriteBehindSeconds number of seconds to write if there is a
* CacheStore; zero disables write-behind
* caching, which (combined with !fReadOnly)
* implies write-through
* @param dflRefreshAheadFactor the interval before an entry expiration time
* (expressed as a percentage of the internal
* cache expiration interval) during which an
* asynchronous load request for the
* entry will be scheduled; zero disables
* refresh-ahead; only applicable when
* the mapInternal parameter is an
* instance of {@link ConfigurableCacheMap}
*/
public ReadWriteBackingMap(BackingMapManagerContext ctxService, ObservableMap mapInternal,
Map mapMisses, CacheLoader loader, boolean fReadOnly, int cWriteBehindSeconds,
double dflRefreshAheadFactor)
{
init(ctxService, mapInternal, mapMisses, loader, null, null, fReadOnly,
cWriteBehindSeconds, dflRefreshAheadFactor);
}
/**
* Construct a ReadWriteBackingMap based on a BinaryEntryStore.
*
* @param ctxService the context provided by the CacheService
* which is using this backing map
* @param mapInternal the ObservableMap used to store the data
* internally in this backing map
* @param mapMisses the Map used to cache CacheStore misses
* (optional)
* @param storeBinary the BinaryEntryStore responsible for the
* persistence of the cached data (optional)
* @param fReadOnly pass true is the specified loader is in fact
* a CacheStore that needs to be used only for
* read operations; changes to the cache will
* not be persisted
* @param cWriteBehindSeconds number of seconds to write if there is a
* CacheStore; zero disables write-behind
* caching, which (combined with !fReadOnly)
* implies write-through
* @param dflRefreshAheadFactor the interval before an entry expiration time
* (expressed as a percentage of the internal
* cache expiration interval) during which an
* asynchronous load request for the
* entry will be scheduled; zero disables
* refresh-ahead; only applicable when
* the mapInternal parameter is an
* instance of {@link ConfigurableCacheMap}
* @since Coherence 3.6
*/
public ReadWriteBackingMap(BackingMapManagerContext ctxService, ObservableMap mapInternal,
Map mapMisses, BinaryEntryStore storeBinary, boolean fReadOnly, int cWriteBehindSeconds,
double dflRefreshAheadFactor)
{
init(ctxService, mapInternal, mapMisses, null, storeBinary, null, fReadOnly,
cWriteBehindSeconds, dflRefreshAheadFactor);
}
/**
* Construct a ReadWriteBackingMap based on a NonBlockingEntryStore.
*
* @param ctxService the context provided by the CacheService
* which is using this backing map
* @param mapInternal the ObservableMap used to store the data
* internally in this backing map
* @param mapMisses the Map used to cache CacheStore misses
* (optional)
* @param storeBinary the NonBlockingEntryStore responsible for the
* persistence of the cached data (optional)
* @param fReadOnly pass true is the specified loader is in fact
* a CacheStore that needs to be used only for
* read operations; changes to the cache will
* not be persisted
* @param cWriteBehindSeconds number of seconds to write if there is a
* CacheStore; zero disables write-behind
* caching, which (combined with !fReadOnly)
* implies write-through
* @param dflRefreshAheadFactor the interval before an entry expiration time
* (expressed as a percentage of the internal
* cache expiration interval) during which an
* asynchronous load request for the
* entry will be scheduled; zero disables
* refresh-ahead; only applicable when
* the mapInternal parameter is an
* instance of {@link ConfigurableCacheMap}
* @since Coherence 21.06
*/
public ReadWriteBackingMap(BackingMapManagerContext ctxService, ObservableMap mapInternal,
Map mapMisses, NonBlockingEntryStore storeBinary, boolean fReadOnly, int cWriteBehindSeconds,
double dflRefreshAheadFactor)
{
init(ctxService, mapInternal, mapMisses, null, null, storeBinary, fReadOnly,
cWriteBehindSeconds, dflRefreshAheadFactor);
}
/**
* Initialize the ReadWriteBackingMap.
*
* @param ctxServic the context provided by the CacheService
* which is using this backing map
* @param mapInternal the ObservableMap used to store the data
* internally in this backing map
* @param mapMisses the Map used to cache CacheStore misses
* (optional)
* @param loader the object responsible for the
* persistence of the cached data (optional)
* @param storeBinary the BinaryEntryStore to wrap (mutually
* exclusive with storeNonBlocking)
* @param storeNonBlocking the NonBlockingEntryStore to wrap
* (mutually exclusive with storeBinary)
* @param fReadOnly pass true is the specified loader is in
* fact a CacheStore that needs to be used
* only for read operations; changes to the
* cache will not be persisted
* @param cWriteBehindSeconds number of seconds to write if there is a
* CacheStore; zero disables write-behind
* caching, which (combined with !fReadOnly)
* implies write-through
* @param dflRefreshAheadFactor the interval before an entry expiration
* time (expressed as a percentage of the
* internal cache expiration interval)
* during which an asynchronous load request
* for the entry will be scheduled; zero
* disables refresh-ahead; only applicable
* when the mapInternal parameter
* is an instance of
* {@link ConfigurableCacheMap}
*/
private void init(BackingMapManagerContext ctxService, ObservableMap mapInternal,
Map mapMisses, CacheLoader loader, BinaryEntryStore storeBinary, NonBlockingEntryStore storeNonBlocking,
boolean fReadOnly, int cWriteBehindSeconds, double dflRefreshAheadFactor)
{
m_ctxService = ctxService;
configureInternalCache(mapInternal);
if (loader != null || storeBinary != null || storeNonBlocking != null)
{
// the misses map is only applicable when there is a valid store
m_mapMisses = mapMisses;
if (loader == null)
{
configureCacheStore(storeBinary == null
? instantiateCacheStoreWrapper(storeNonBlocking)
: instantiateCacheStoreWrapper(storeBinary),
fReadOnly);
}
else if (loader instanceof CacheStore)
{
configureCacheStore(
instantiateCacheStoreWrapper((CacheStore) loader), fReadOnly);
}
else
{
configureCacheStore(
instantiateCacheStoreWrapper(
instantiateCacheLoaderCacheStore(loader)), true);
}
// configure the optional write-behind queue and daemon
configureWriteThread(cWriteBehindSeconds);
// configure the optional refresh-ahead queue and daemon
configureReadThread(dflRefreshAheadFactor);
}
}
// ----- accessors ------------------------------------------------------
/**
* Get the context information provided by the CacheService.
*
* @return the CacheService's BackingMapManagerContext object that it
* provided to the BackingMapManager that created this backing
* map
*/
public BackingMapManagerContext getContext()
{
return m_ctxService;
}
/**
* Return the CacheService.
*
* @return the CacheService
*/
public CacheService getCacheService()
{
return getContext().getCacheService();
}
/**
* Determine if exceptions caught during synchronous CacheStore operations
* are rethrown to the calling thread; if false, exceptions are logged.
*
* @return true if CacheStore exceptions are rethrown to the calling thread
*/
public boolean isRethrowExceptions()
{
return m_fRethrowExceptions;
}
/**
* Set the value of the flag that determines if exceptions caught during
* synchronous CacheStore operations are rethrown to the calling thread; if
* false, exceptions are logged.
*
* @param fRethrow true to indicate that exceptions should be rethrown
*/
public void setRethrowExceptions(boolean fRethrow)
{
m_fRethrowExceptions = fRethrow;
}
/**
* Return the refresh-ahead factor.
*
* The refresh-ahead factor is used to calculate the "soft-expiration" time
* for cache entries. Soft-expiration is the point in time prior to the
* actual expiration after which any access request for an entry will
* schedule an asynchronous load request for the entry.
*
* The value of this property is expressed as a percentage of the internal
* cache expiration interval. If zero, refresh-ahead scheduling is
* disabled.
*
* @return the refresh-ahead factor
*/
public double getRefreshAheadFactor()
{
return m_dflRefreshAheadFactor;
}
/**
* Set the refresh-ahead factor, expressed as a percentage of the internal
* cache expiration interval. Valid values are doubles in the interval
* [0.0, 1.0].
*
* This method has no effect if refresh-ahead is disabled.
*
* @param dflRefreshAheadFactor the new refresh-ahead factor
*
* @see #getRefreshAheadFactor
*/
public void setRefreshAheadFactor(double dflRefreshAheadFactor)
{
if (isRefreshAhead())
{
if (dflRefreshAheadFactor >= 0.0 && dflRefreshAheadFactor <= 1.0)
{
m_dflRefreshAheadFactor = dflRefreshAheadFactor;
}
else
{
throw new IllegalArgumentException("Invalid refresh-ahead factor: "
+ dflRefreshAheadFactor);
}
}
}
/**
* Determine if the backing map should send data changes through the
* CacheStore, or should just keep them in memory.
*
* @return false to send changes to CacheStore (a read-write cache), or
* true to just keep them in memory (a read-only cache)
*/
public boolean isReadOnly()
{
return m_fReadOnly;
}
/**
* Determine if the backing map preemptively reads soon-to-be expired entries
* on a refresh-ahead thread.
*
* @return true if refresh-ahead is enabled
*/
public boolean isRefreshAhead()
{
return getCacheStore() != null && getReadQueue() != null;
}
/**
* Get the maximum size of the write-behind batch.
*
* @return the maximum number of entries in the write-behind batch
*/
public int getWriteMaxBatchSize()
{
return m_cWriteMaxBatchSize;
}
/**
* Set the maximum size of a batch. The size is used to reduce the size
* of the write-behind batches and the amount of [scratch] memory used to
* keep de-serialized entries passed to the storeAll operations.
*
* This method has no effect if write-behind is disabled.
*
* @param cWriteMaxBatchSize the maximum batch size
*/
public void setWriteMaxBatchSize(int cWriteMaxBatchSize)
{
if (cWriteMaxBatchSize <= 0)
{
throw new IllegalArgumentException(
"Invalid batch size: " + cWriteMaxBatchSize);
}
m_cWriteMaxBatchSize = cWriteMaxBatchSize;
}
/**
* Return the write-batch factor.
*
* The write-batch factor is used to calculate the "soft-ripe" time for
* write-behind queue entries. A queue entry is considered to be "ripe"
* for a write operation if it has been in the write-behind queue for no
* less than the write-behind interval. The "soft-ripe" time is the point
* in time prior to the actual ripe time after which an entry will be
* included in a batched asynchronous write operation to the CacheStore
* (along with all other ripe and soft-ripe entries). In other words, a
* soft-ripe entry is an entry that has been in the write-behind queue
* for at least the following duration:
*
* D' = (1.0 - F)*D
* where:
*
* D = write-behind delay
* F = write-batch factor
* Conceptually, the write-behind thread uses the following logic when
* performing a batched update:
*
* - The thread waits for a queued entry to become ripe.
* - When an entry becomes ripe, the thread dequeues all ripe and
* soft-ripe entries in the queue.
* - The thread then writes all ripe and soft-ripe entries either via
* {@link CacheStore#store store()} (if there is only the single ripe
* entry) or {@link CacheStore#storeAll storeAll()} (if there are
* multiple ripe/soft-ripe entries).
* - The thread then repeats (1).
*
*
* This property is only applicable if asynchronous writes are enabled and
* the CacheStore implements the {@link CacheStore#storeAll storeAll()}
* method.
*
* The value of this property is expressed as a percentage of the
* {@link #getWriteBehindSeconds write-behind} interval. Valid values are
* doubles in the interval [0.0, 1.0].
*
* @return the write-batch factor
*/
public double getWriteBatchFactor()
{
return m_dflWriteBatchFactor;
}
/**
* Set the write-batch factor, expressed as a percentage of the
* write-behind interval. Valid values are doubles in the interval
* [0.0, 1.0].
*
* This method has no effect if write-behind is disabled.
*
* @param dflWriteBatchFactor the new write-batch factor
*
* @see #getWriteBatchFactor
*/
public void setWriteBatchFactor(double dflWriteBatchFactor)
{
if (isWriteBehind())
{
if (dflWriteBatchFactor >= 0.0 && dflWriteBatchFactor <= 1.0)
{
m_dflWriteBatchFactor = dflWriteBatchFactor;
}
else
{
throw new IllegalArgumentException("Invalid write-batch factor: "
+ dflWriteBatchFactor);
}
}
}
/**
* Determine if the backing map writes changes on a write-behind thread
* through the CacheStore.
*
* @return true implies changes are queued to be written asynchronously
*/
public boolean isWriteBehind()
{
return !isReadOnly() && getCacheStore() != null && getWriteQueue() != null;
}
/**
* Return the number of seconds between write-behind writes to the
* CacheStore or 0 if write-behind is not enabled.
*
* @return the number of seconds between write-behind writes
*/
public int getWriteBehindSeconds()
{
long cMillis = getWriteBehindMillis();
return cMillis == 0 ? 0 : Math.max(1, (int) (cMillis / 1000));
}
/**
* Set the number of seconds between write-behind writes to the CacheStore.
*
* This method has not effect if write-behind is not enabled.
*
* @param cSecs the new write-behind delay in seconds
*/
public void setWriteBehindSeconds(int cSecs)
{
setWriteBehindMillis(1000L * cSecs);
}
/**
* Return the number of milliseconds between write-behind writes to the
* CacheStore or 0 if write-behind is not enabled.
*
* @return the number of milliseconds between write-behind writes
*
* @since Coherence 3.4
*/
public long getWriteBehindMillis()
{
return m_cWriteBehindMillis;
}
/**
* Set the number of milliseconds between write-behind writes to the CacheStore.
*
* This method has not effect if write-behind is not enabled.
*
* @param cMillis the new write-behind delay in milliseconds
*
* @since Coherence 3.4
*/
public void setWriteBehindMillis(long cMillis)
{
if (isWriteBehind())
{
if (cMillis > 0)
{
ConfigurableCacheMap cache = getInternalConfigurableCache();
if (cache != null)
{
// make sure that the internal cache expiry is greater or
// equal to the write-behind delay
int cExpiryMillis = cache.getExpiryDelay();
if (cExpiryMillis > 0 && cExpiryMillis < cMillis)
{
StringBuilder sb = new StringBuilder()
.append("ReadWriteBackingMap internal cache expiry of ")
.append(cExpiryMillis)
.append(" milliseconds is less than the write-delay of ")
.append(cMillis)
.append(" milliseconds; ");
cMillis = cExpiryMillis;
sb.append("decreasing the write-delay to ")
.append(cMillis)
.append(" milliseconds.");
Base.log(sb.toString());
}
}
m_cWriteBehindMillis = cMillis;
getWriteQueue().setDelayMillis(cMillis);
}
else
{
throw new IllegalArgumentException("Invalid write-behind delay: "
+ cMillis);
}
}
}
/**
* Return the maximum size of the write-behind queue for which failed
* CacheStore write operations are requeued or 0 if write-behind
* requeueing is disabled.
*
* @return the write-behind requeue threshold
*/
public int getWriteRequeueThreshold()
{
return m_cWriteRequeueThreshold;
}
/**
* Set the maximum size of the write-behind queue for which failed CacheStore
* write operations are requeued.
*
* This method has not effect if write-behind is not enabled.
*
* @param cThreshold the new write-behind requeue threshold
*/
public void setWriteRequeueThreshold(int cThreshold)
{
if (isWriteBehind())
{
if (cThreshold >= 0)
{
m_cWriteRequeueThreshold = cThreshold;
}
else
{
throw new IllegalArgumentException("Invalid write requeue threshold: "
+ cThreshold);
}
}
}
/**
* Determine if the backing map writes changes immediately through the
* CacheStore.
*
* @return true implies that changes to the backing map are written
* synchronously to the CacheStore
*/
public boolean isWriteThrough()
{
return !isReadOnly() && getCacheStore() != null && getWriteQueue() == null;
}
/**
* Return the timeout used for CacheStore operations, or 0 if no timeout is
* specified.
*
* @return the CacheStore timeout
*/
public long getCacheStoreTimeoutMillis()
{
return m_cStoreTimeoutMillis;
}
/**
* Set the timeout used for CacheStore operations. A value of 0 indicates
* to use the default guardian timeout of the associated service.
*
* @param cStoreTimeoutMillis the CacheStore timeout, or 0 for the default
* guardian timeout
*/
public void setCacheStoreTimeoutMillis(long cStoreTimeoutMillis)
{
m_cStoreTimeoutMillis = cStoreTimeoutMillis;
CacheService service = getContext().getCacheService();
if (service instanceof Guardian)
{
ReadThread daemonRead = getReadThread();
WriteThread daemonWrite = getWriteThread();
if (daemonRead != null)
{
daemonRead.setGuardPolicy((Guardian) service,
cStoreTimeoutMillis, GUARD_RECOVERY);
daemonRead.m_fRefreshContext = true;
}
if (daemonWrite != null)
{
daemonWrite.setGuardPolicy((Guardian) service,
cStoreTimeoutMillis, GUARD_RECOVERY);
daemonWrite.m_fRefreshContext = true;
}
}
}
/**
* Set the cache name for ReadThread and WriteThread if not already set.
*
* @param sCacheName the name of the cache
*/
public void setCacheName(String sCacheName)
{
if (sCacheName != null && sCacheName.trim().length() > 0)
{
updateThreadName(getReadThread(), sCacheName);
updateThreadName(getWriteThread(), sCacheName);
}
}
/**
* Return the size of the write-behind queue if configured, or the
* number of pending writes of the non-blocking store, if configured.
* Return -1 if neither.
*
* @return number of pending writes
*/
public int getPendingWrites()
{
if (isWriteBehind())
{
return getWriteQueue().size();
}
StoreWrapper store = getCacheStore();
if (store != null && !store.isBlocking())
{
return (int) store.f_cPendingAsyncStoreOps.get();
}
return -1;
}
// ----- Map interface --------------------------------------------------
/**
* Remove everything from the Map.
*/
public void clear()
{
for (Iterator iter = entrySet().iterator(); iter.hasNext(); )
{
iter.next();
iter.remove();
}
}
/**
* Returns true if this map contains a mapping for the specified
* key.
*
* @param oKey the key to test for
*
* @return true if this map contains a mapping for the specified
* key, false otherwise.
*/
public boolean containsKey(Object oKey)
{
return getInternalCache().containsKey(oKey);
}
/**
* Returns true if this CachingMap maps one or more keys to the
* specified value.
*
* @param oValue the value to test for
*
* @return true if this CachingMap maps one or more keys to the
* specified value, false otherwise
*/
public boolean containsValue(Object oValue)
{
return getInternalCache().containsValue(oValue);
}
/**
* Returns the value to which this map maps the specified key.
*
* @param oKey the key object
*
* @return the value to which this map maps the specified key,
* or null if the map contains no mapping for this key
*/
public Object get(Object oKey)
{
ConcurrentMap mapControl = getControlMap();
Map mapMisses = getMissesCache();
mapControl.lock(oKey, -1L);
try
{
// check the misses cache
if (mapMisses != null && mapMisses.containsKey(oKey))
{
return null;
}
Object oValue = getFromInternalCache(oKey);
// if the value wasn't found in the in-memory cache; if it's owned
// ("get" is not caused by a re-distribution), load the value from
// the CacheStore and cache the value in the in-memory cache
if (oValue == null && getContext().isKeyOwned(oKey))
{
StoreWrapper store = getCacheStore();
if (store != null)
{
// load the data from the CacheStore
Entry entry = store.load(oKey);
oValue = entry == null ? null : entry.getBinaryValue();
putToInternalCache(oKey, oValue, extractExpiry(entry));
}
}
return oValue;
}
finally
{
mapControl.unlock(oKey);
}
}
/**
* Associates the specified value with the specified key in this map.
*
* @param oKey key with which the specified value is to be associated
* @param oValue value to be associated with the specified key
*
* @return previous value associated with specified key, or null
* if there was no mapping for key. A null return can
* also indicate that the map previously associated null
* with the specified key, if the implementation supports
* null values.
*/
public Object put(Object oKey, Object oValue)
{
return putInternal(oKey, oValue, 0L);
}
/**
* Removes the mapping for this key from this map if present.
* Expensive: updates both the underlying cache and the local cache.
*
* @param oKey key whose mapping is to be removed from the map
*
* @return previous value associated with specified key, or null
* if there was no mapping for key. A null return can
* also indicate that the map previously associated null
* with the specified key, if the implementation supports
* null values.
*/
public Object remove(Object oKey)
{
return removeInternal(oKey, false);
}
/**
* Associates the specified values with the respective keys in this map.
*
* Be aware that the keys will be locked in the order they are returned from
* iterating over the map passed in and unlocked at the end of the method.
* This method is called internally within Coherence and the keys will have
* been locked at the Service level already, so concurrent calls to this method
* with the same keys will not be an issue.
* If this method is somehow called directly by application code, which is not
* recommended, then it is advisable to pass in a sorted map that sorts the keys
* by their natural ordering.
*
* @param map keys and values which are to be associated in this map
*/
@SuppressWarnings("unchecked")
@Override
public void putAll(Map map)
{
StoreWrapper store = getCacheStore();
if (map.size() == 1 || getWriteQueue() != null ||
store == null || !store.isStoreAllSupported() || isReadOnly())
{
super.putAll(map);
return;
}
ConcurrentMap mapControl = getControlMap();
try
{
Map mapMisses = getMissesCache();
Map mapInternal = getInternalCache();
Set setEntries = new LinkedHashSet<>(map.size());
BackingMapManagerContext ctx = getContext();
for (Map.Entry entry : (Set) map.entrySet())
{
Object oKey = entry.getKey();
mapControl.lock(oKey, -1L);
// clear the key from the misses cache
if (mapMisses != null)
{
mapMisses.remove(oKey);
}
cancelOutstandingReads(oKey);
// if key is owned this is a regular put as opposed to a put due to fail-over
if (ctx.isKeyOwned(oKey))
{
setEntries.add(instantiateEntry(oKey, entry.getValue(), mapInternal.get(oKey), 0L));
}
}
if (!setEntries.isEmpty())
{
SubSet setEntriesFailed = new SubSet(setEntries);
Set setSuccess = null;
try
{
store.storeAll(setEntriesFailed);
setSuccess = setEntries;
}
catch (Throwable e)
{
setSuccess = setEntriesFailed.getRemoved();
throw Base.ensureRuntimeException(e);
}
finally
{
for (Entry entry : setSuccess)
{
long cMillis = 0L;
Binary binValue = entry.getBinaryValue();
if (entry.isChanged())
{
binValue = entry.getChangedBinaryValue();
// due to technical reasons (event handling), synchronous
// removal (binValue == null) is not currently supported
// instead, we schedule the entry for almost
// instant expiry
cMillis = binValue == null ? 1L : extractExpiry(entry);
}
putToInternalMap(entry.getBinaryKey(), binValue, cMillis);
}
}
}
}
finally
{
for (Object oKey : map.keySet())
{
mapControl.unlock(oKey);
}
}
}
/**
* Put the specified key in internal format and value in
* internal format into the internal backing map.
* If the cExpiry parameter is greater than the default expiry
* value CacheMap.EXPIRY_DEFAULT and the internal map is not an
* instance of {@link CacheMap} then an exception will be thrown.
*
* @param binKey the key in internal format
* @param binValue the value in internal format; null if the value should be
* cached as "missing"
* @param cExpiry the cache entry expiry value
*
* @return any previous value tht was mapped to the key.
*
* @throws UnsupportedOperationException if the value of cExpiry is
* greater than CacheMap.EXPIRY_DEFAULT and the internal map
* is not an instance of {@link CacheMap}.
*/
protected Object putToInternalMap(Object binKey, Object binValue, long cExpiry)
{
Map mapInternal = getInternalCache();
if (mapInternal instanceof CacheMap)
{
return ((CacheMap) mapInternal).put(binKey, binValue, cExpiry);
}
else if (cExpiry <= CacheMap.EXPIRY_DEFAULT)
{
return mapInternal.put(binKey, binValue);
}
throw new UnsupportedOperationException(
"Class \"" + mapInternal.getClass().getName() +
"\" does not implement CacheMap interface");
}
/**
* Implementation of the remove() API.
*
* @param oKey key whose mapping is to be removed from the map
* @param fBlind if true, the return value will be ignored
*
* @return previous value associated with specified key, or null
*/
protected Object removeInternal(Object oKey, boolean fBlind)
{
ConcurrentMap mapControl = getControlMap();
Map mapMisses = getMissesCache();
mapControl.lock(oKey, -1L);
try
{
// clear the key from the misses cache
if (mapMisses != null)
{
mapMisses.remove(oKey);
}
cancelOutstandingReads(oKey);
// similar to put(), but removes cannot be queued, so there are only
// two possibilities:
// (1) read-only: remove in memory only; no CacheStore ops
// (2) write-through or write-behind: immediate erase through
// CacheStore
// the remove is a potential CacheStore operation even if there is
// no entry in the internal cache except if it's caused by the
// CacheService transferring the entry from this backing map;
// make sure it is owned by this node before delegating to the store
Object oValue = getCachedOrPending(oKey);
StoreWrapper store = getCacheStore();
if (store != null)
{
boolean fOwned = getContext().isKeyOwned(oKey);
// check if the value needs to be loaded
if (!fBlind && oValue == null && fOwned)
{
Entry entry = store.load(oKey);
oValue = entry == null ? null : entry.getBinaryValue();
// if there is nothing to remove, then return immediately
if (oValue == null)
{
return null;
}
}
// remove from the store only if is a read/write store
if (!isReadOnly())
{
removeFromWriteQueue(oKey);
if (fOwned)
{
store.erase(instantiateEntry(oKey, null, oValue));
}
}
}
// the remove from the internal cache comes last
getInternalCache().remove(oKey);
return oValue;
}
finally
{
mapControl.unlock(oKey);
}
}
/**
* Returns the number of key-value mappings in this map. If the
* map contains more than Integer.MAX_VALUE elements, returns
* Integer.MAX_VALUE.
*
* @return the number of key-value mappings in this map
*/
public int size()
{
return getInternalCache().size();
}
/**
* Returns an set view of the mappings contained in this map.
*
* @return a set view of the mappings contained in this map
*/
public Set entrySet()
{
EntrySet set = m_entryset;
if (set == null)
{
m_entryset = set = instantiateEntrySet();
}
return set;
}
/**
* Returns an set view of the keys contained in this map.
*
* @return a set view of the keys contained in this map
*/
public Set keySet()
{
KeySet set = m_keyset;
if (set == null)
{
m_keyset = set = instantiateKeySet();
}
return set;
}
/**
* Returns a collection view of the values contained in this map.
*
* @return a collection view of the values contained in this map
*/
public Collection values()
{
ValuesCollection values = m_values;
if (values == null)
{
m_values = values = instantiateValuesCollection();
}
return values;
}
// ----- CacheMap interface ---------------------------------------------
/**
* Associates the specified value with the specified key in this map.
*
* @param oKey key with which the specified value is to be associated
* @param oValue value to be associated with the specified key
* @param cMillis the number of milliseconds until the entry will expire;
* pass zero to use the cache's default ExpiryDelay settings;
* pass -1 to indicate that the entry should never expire
*
* @return previous value associated with specified key, or null
* if there was no mapping for key
*/
public Object put(Object oKey, Object oValue, long cMillis)
{
return putInternal(oKey, oValue, cMillis);
}
/**
* Retrieve values for all the specified keys.
*
* @param colKeys a collection of keys that may be in the named cache
*
* @return a Map of keys to values for the specified keys passed in
* colKeys
*/
public Map getAll(Collection colKeys)
{
ConcurrentMap mapControl = getControlMap();
BackingMapManagerContext ctx = getContext();
if (!(colKeys instanceof SortedSet))
{
// assume "natural" sort; all keys must be Comparable
colKeys = new TreeSet(colKeys);
}
// to simplify the code, we lock all keys (that could potentially
// be loaded) within the "try-finally" block, relying on the
// "forgiving" behavior of the unlock API that would just ignore
// the keys that were not locked
try
{
Map mapMisses = getMissesCache();
Map mapResult = new HashMap();
Set setLoad = new HashSet();
for (Object oKey : colKeys)
{
// we assume that unlike "get", the "getAll" is never called during
// re-distribution; technically speaking we should not even need to
// make the "isKeyOwned" check, but let's play it safe
if (ctx.isKeyOwned(oKey))
{
mapControl.lock(oKey, -1L);
}
else
{
throw new IllegalStateException("Key is not owned: " + oKey);
}
if (mapMisses != null && mapMisses.containsKey(oKey))
{
// known to be missing; skip
continue;
}
Object oValue = getFromInternalCache(oKey);
if (oValue == null)
{
// add the key to the set of keys that should be loaded
setLoad.add(oKey);
}
else
{
// the entry is found in the internal cache
mapResult.put(oKey, oValue);
}
}
if (setLoad.isEmpty())
{
// everything is found
return mapResult;
}
StoreWrapper store = getCacheStore();
if (store != null)
{
Set setLoaded = store.loadAll(setLoad);
Set setMissed = new SubSet(setLoad);
// iterate over the loaded entries and insert them
for (Object oEntry : setLoaded)
{
Entry entry = (Entry) oEntry;
Binary binKey = entry.getBinaryKey();
putToInternalCache(entry);
// merge the loaded keys with the entries already found
mapResult.put(binKey, entry.getBinaryValue());
setMissed.remove(binKey);
}
// need to update the "misses" for the keys that could not be loaded
for (Object oKey : setMissed)
{
putToInternalCache(oKey, null, CacheMap.EXPIRY_DEFAULT);
}
}
return mapResult;
}
finally
{
for (Object oKey : colKeys)
{
mapControl.unlock(oKey);
}
}
}
// ----- internal -------------------------------------------------------
/**
* Remove the collection of keys from this ReadWriteBackingMap.
*
* This method will ensure the configured CacheStore.eraseAll method is
* called with all owned keys in addition to removing the relevant entries
* from the internal Map.
*
* @param colKeys a collection of keys to remove, that may be in the map
*
* @return true if any of the provided keys were successfully removed from
* this map
*/
protected boolean removeAll(Collection colKeys)
{
StoreWrapper store = getCacheStore();
ConcurrentMap mapControl = getControlMap();
Map mapInternal = getInternalCache();
Collection colKeysProcessed = colKeys;
try
{
Map mapMisses = getMissesCache();
Set setEntries = new HashSet<>(colKeys.size());
boolean fReadOnly = isReadOnly();
BackingMapManagerContext ctx = getContext();
for (Object oKey : colKeys)
{
mapControl.lock(oKey, -1L);
// clear the key from the misses cache
if (mapMisses != null)
{
mapMisses.remove(oKey);
}
cancelOutstandingReads(oKey);
if (store != null)
{
// whether entry exists or not, erase/eraseAll
// needs to be called: fetch existing value or use null
Object oValue = getCachedOrPending(oKey);
boolean fOwned = ctx.isKeyOwned(oKey);
// remove from the store only if is a read/write store
if (!fReadOnly)
{
removeFromWriteQueue(oKey);
if (fOwned)
{
setEntries.add(instantiateEntry(oKey, null, oValue, 0L));
}
}
}
}
if (!setEntries.isEmpty())
{
SubSet setEntriesFailed = new SubSet<>(setEntries);
Set setSuccess = null;
try
{
if (store != null)
{
store.eraseAll(setEntriesFailed);
}
setSuccess = setEntries;
}
catch (Throwable e)
{
setSuccess = setEntriesFailed.getRemoved();
throw Base.ensureRuntimeException(e);
}
finally
{
colKeysProcessed = ConverterCollections.getCollection(
setSuccess, Entry::getBinaryKey, NullImplementation.getConverter());
}
}
return !colKeysProcessed.isEmpty();
}
finally
{
for (Object binKey : colKeysProcessed)
{
mapInternal.remove(binKey);
}
for (Object oKey : colKeys)
{
mapControl.unlock(oKey);
}
}
}
/**
* Add the key and value pair to the internal cache in such a way that the
* resulting map event would be marked as "synthetic".
*
* @param oKey the key in internal format
* @param oVal the value in internal format; null if the value should be
* cached as "missing"
*/
protected void putToInternalCache(Object oKey, Object oVal)
{
putToInternalCache(oKey, oVal, CacheMap.EXPIRY_DEFAULT);
}
/**
* Add the key and value pair to the internal cache in such a way that the
* resulting map event would be marked as "synthetic".
*
* @param entry cache entry
*/
protected void putToInternalCache(Entry entry)
{
Binary oKey = entry.getBinaryKey();
Binary oVal = entry.getBinaryValue();
putToInternalCache(oKey, oVal, extractExpiry(entry));
}
/**
* Add the key and value pair to the internal cache in such a way that the
* resulting map event would be marked as "synthetic".
*
* @param oKey the key in internal format
* @param oVal the value in internal format; null if the value should be
* cached as "missing"
* @param cMillis the cache entry expiry value
*/
protected void putToInternalCache(Object oKey, Object oVal, long cMillis)
{
Map mapInternal = getInternalCache();
if (oVal == null)
{
Map mapMisses = getMissesCache();
if (mapMisses != null)
{
mapMisses.put(oKey, oKey);
}
}
else
{
Map mapSynthetic = getSyntheticEventsMap();
mapSynthetic.put(oKey, oKey);
try
{
if (cMillis != CacheMap.EXPIRY_DEFAULT && mapInternal instanceof CacheMap)
{
((CacheMap) mapInternal).put(oKey, oVal, cMillis);
}
else
{
mapInternal.put(oKey, oVal);
}
}
finally
{
mapSynthetic.remove(oKey);
}
}
}
/**
* Cancel any outstanding asynchronous reads for a key.
*
* @param oKey the key in internal format
*/
protected void cancelOutstandingReads(Object oKey)
{
if (isRefreshAhead() && !isReadOnly())
{
Map mapControl = getControlMap();
getReadQueue().remove(oKey);
ReadLatch latch = (ReadLatch) mapControl.get(oKey);
if (latch != null)
{
latch.cancel();
mapControl.remove(oKey);
}
}
}
/**
* Get the the value for a given key. If the entry is present in the
* internal cache and refresh-ahead is configured, check if a reload
* operation needs to be scheduled. If the entry is missing, check for a
* potentially pending refresh-ahead operation and potentially pending
* write-behind.
*
* @param oKey the key in internal format
*
* @return the value or null if the value is not found
*/
protected Object getFromInternalCache(Object oKey)
{
if (isRefreshAhead())
{
ConfigurableCacheMap cache = getInternalConfigurableCache();
ConfigurableCacheMap.Entry entry = cache.getCacheEntry(oKey);
Map mapControl = getControlMap();
if (entry == null)
{
if (!getContext().isKeyOwned(oKey))
{
// not owned (quite likely re-distribution); skip
return null;
}
// check to see if the value for the given key is currently
// being loaded by the refresh-ahead thread
ReadLatch latch = (ReadLatch) mapControl.get(oKey);
if (latch == null)
{
// remove the key from the queue to prevent double loads
getReadQueue().remove(oKey);
}
else
{
try
{
synchronized (latch)
{
// wait for the load operation to complete,
// if necessary
while (!latch.isComplete())
{
Blocking.wait(latch);
}
}
}
catch (InterruptedException e)
{
Thread.currentThread().interrupt();
}
finally
{
// remove the latch from the control map
mapControl.remove(oKey);
}
// this method may rethrow an exception thrown by the
// asynchronous load operation
Object oVal = latch.getValue();
putToInternalCache(oKey, oVal);
return oVal;
}
}
else
{
// if the entry is ripe for an asynchronous load and the
// refresh-ahead thread is not currently loading the key,
// add the key to the refresh-ahead queue
long lExpiryMillis = entry.getExpiryMillis();
if (lExpiryMillis != 0)
{
long lInterval = (long) (cache.getExpiryDelay()
* getRefreshAheadFactor());
if (Base.getSafeTimeMillis() >= lExpiryMillis - lInterval)
{
ReadLatch latch = (ReadLatch) mapControl.get(oKey);
if (latch == null)
{
getReadQueue().add(oKey);
}
}
}
return entry.getValue();
}
}
return getCachedOrPending(oKey);
}
/**
* Get a value from the internal cache in a way that respects a potentially
* pending write-behind operation.
*
* @param oKey the key
*
* @return the corresponding value
*/
protected Object getCachedOrPending(Object oKey)
{
Object oValue = getInternalCache().get(oKey);
if (oValue == null)
{
WriteQueue queue = getWriteQueue();
if (queue != null)
{
// check for a rare case when an entry has been evicted
// at the exact time when it ripened and is being processed
// by the WriteBehind thread
// (see COH-1234)
oValue = queue.checkPending(oKey);
}
}
return oValue;
}
/**
* An actual implementation for the extended put() method.
*
* @param oKey key with which the specified value is to be associated
* @param oValue value to be associated with the specified key
* @param cMillis the number of milliseconds until the entry will expire
*
* @return previous value associated with specified key, or null
* if there was no mapping for key.
*/
protected Object putInternal(Object oKey, Object oValue, long cMillis)
{
ConcurrentMap mapControl = getControlMap();
Map mapMisses = getMissesCache();
Map mapInternal = getInternalCache();
mapControl.lock(oKey, -1L);
try
{
// clear the key from the misses cache
if (mapMisses != null)
{
mapMisses.remove(oKey);
}
cancelOutstandingReads(oKey);
BackingMapManagerContext ctx = getContext();
// there are three possibilities:
// (1) read-only: keep it in memory; no CacheStore ops
// (2) write-through: immediate write to CacheStore
// (3) write-behind: queued write to CacheStore or failover
StoreWrapper store = getCacheStore();
WriteQueue queue = getWriteQueue();
if (store != null && !isReadOnly())
{
// the "owned" flag indicates whether or not this put is a
// regular (client driven) operation or an effect of a failover
boolean fOwned = ctx.isKeyOwned(oKey);
if (queue == null)
{
boolean fDecorated = !fOwned && !store.isBlocking() &&
ExternalizableHelper.isDecorated((Binary) oValue,
BackingMapManagerContext.DECO_STORE);
if (fOwned || fDecorated)
{
if (!store.isBlocking() && !fDecorated)
{
// non-blocking entry store;
// decorate the entry with a "store deferred" flag
oValue = ExternalizableHelper.decorate((Binary) oValue,
BackingMapManagerContext.DECO_STORE, BIN_STORE_PENDING);
}
Entry entry =
instantiateEntry(oKey, oValue, mapInternal.get(oKey), cMillis);
store.store(entry, false);
if (entry.isChanged())
{
oValue = entry.getChangedBinaryValue();
// due to technical reasons (event handling), synchronous
// removal (oValue == null) is not currently supported
// instead, we schedule the entry for almost
// instant expiry
cMillis = oValue == null ? 1L : extractExpiry(entry);
}
}
}
else
{
if (fOwned)
{
// regular operation;
// decorate the entry with a "store deferred" flag
oValue = ExternalizableHelper.decorate((Binary) oValue,
BackingMapManagerContext.DECO_STORE, BIN_STORE_PENDING);
}
else
{
// failover; check if the value has already been stored
if (!ExternalizableHelper.isDecorated((Binary) oValue,
BackingMapManagerContext.DECO_STORE))
{
// absence of the decoration means that it has
// already been stored; no requeueing is necessary
queue = null;
}
else
{
// if we ever need to add statistics regarding a
// number of items that were re-queued due to a
// failover redistribution
}
}
}
}
// update the in-memory cache and queue if necessary
if (queue != null)
{
queue.add(
instantiateEntry(oKey, oValue, mapInternal.get(oKey), cMillis), 0L);
}
return putToInternalMap(oKey, oValue, cMillis);
}
finally
{
mapControl.unlock(oKey);
}
}
/**
* Wait for notification on the specified object for no longer than
* the specified wait time.
*
* Note: the caller must hold synchronization on the object being waited
* on.
*
* @param o the object to wait for notification on
* @param cMillis the maximum time in milliseconds to wait;
* pass 0 for forever
*
* @return true iff notification was received, the timeout has
* passed, or the thread was spuriously wakened;
* false if this thread was interrupted
*/
protected boolean waitFor(Object o, long cMillis)
{
try
{
Blocking.wait(o, cMillis);
}
catch (InterruptedException e)
{
if (isActive())
{
throw Base.ensureRuntimeException(e);
}
return false;
}
return true;
}
/**
* Issue a service guardian "heartbeat" for the current thread.
*/
protected void heartbeat()
{
long cMillis = getCacheStoreTimeoutMillis();
if (cMillis == 0)
{
GuardSupport.heartbeat();
}
else
{
GuardSupport.heartbeat(cMillis);
}
}
/**
* Return the expiration value for the given entry.
*
* @param entry the entry
*
* @return the expiration value
*/
protected long extractExpiry(Entry entry)
{
return entry == null ? CacheMap.EXPIRY_DEFAULT : entry.getExpiry();
}
/**
* Append the provided name to the Daemon's thread name if not already appended.
*
* @param daemon the Daemon to be modified
* @param sName the name to append to the Daemon's thread name
*/
protected void updateThreadName(Daemon daemon, String sName)
{
if (daemon != null)
{
Thread thread = daemon.getThread();
String sNamePrev = thread.getName();
sName = ":" + sName;
if (!sNamePrev.endsWith(sName))
{
thread.setName(sNamePrev + sName);
}
}
}
// ----- inner class: EntrySet ------------------------------------------
/**
* Factory pattern: instantiate an entry set for this backing map.
*
* @return a new EntrySet object
*/
protected EntrySet instantiateEntrySet()
{
return new EntrySet();
}
/**
* A set of entries corresponding to this backing map.
*
* @author cp 2002.10.22
*/
protected class EntrySet
extends AbstractSet
{
// ----- Set interface ------------------------------------------
/**
* Returns an iterator over the elements contained in this collection.
*
* @return an iterator over the elements contained in this collection
*/
public Iterator iterator()
{
if (ReadWriteBackingMap.this.isEmpty())
{
return NullImplementation.getIterator();
}
return new SimpleEnumerator(getInternalCache().keySet().toArray())
{
public Object next()
{
m_entryPrev = instantiateEntry(super.next());
return m_entryPrev;
}
public void remove()
{
if (m_entryPrev == null)
{
throw new IllegalStateException();
}
else
{
ReadWriteBackingMap.this.
removeInternal(m_entryPrev.getKey(), true);
m_entryPrev = null;
}
}
private Map.Entry m_entryPrev;
};
}
/**
* Returns the number of elements in this collection. If the collection
* contains more than Integer.MAX_VALUE elements, returns
* Integer.MAX_VALUE.
*
* @return the number of elements in this collection
*/
public int size()
{
return ReadWriteBackingMap.this.size();
}
/**
* Returns true if this collection contains the specified
* element. More formally, returns true if and only if this
* collection contains at least one element e such that
* (o==null ? e==null : o.equals(e)).
*
* @param o object to be checked for containment in this collection
*
* @return true if this collection contains the specified element
*/
public boolean contains(Object o)
{
if (o instanceof Map.Entry)
{
Map.Entry entry = (Map.Entry) o;
Object oKey = entry.getKey();
ReadWriteBackingMap map = ReadWriteBackingMap.this;
return map.containsKey(oKey)
&& Base.equals(entry.getValue(), map.get(oKey))
&& map.containsKey(oKey); // verify not evicted
}
return false;
}
/**
* Removes the specified element from this Set of entries if it is
* present by removing the associated entry from the underlying
* Map.
*
* @param o object to be removed from this set, if present
*
* @return true if the set contained the specified element
*/
public boolean remove(Object o)
{
ReadWriteBackingMap map = ReadWriteBackingMap.this;
Object oKey = ((Map.Entry) o).getKey();
boolean fExists = map.containsKey(oKey);
// whether or not the entry exists; store.erase() should be called
// in the same way as by the remove's behavior
map.removeInternal(oKey, true);
return fExists;
}
/**
* Removes all of the elements from this set of Keys by clearing the
* underlying Map.
*/
public void clear()
{
ReadWriteBackingMap.this.clear();
}
/**
* Returns an array containing all of the elements in this collection. If
* the collection makes any guarantees as to what order its elements are
* returned by its iterator, this method must return the elements in the
* same order. The returned array will be "safe" in that no references to
* it are maintained by the collection. (In other words, this method must
* allocate a new array even if the collection is backed by an Array).
* The caller is thus free to modify the returned array.
*
* @return an array containing all of the elements in this collection
*/
public Object[] toArray()
{
return toArray((Object[]) null);
}
/**
* Returns an array with a runtime type is that of the specified array and
* that contains all of the elements in this collection. If the
* collection fits in the specified array, it is returned therein.
* Otherwise, a new array is allocated with the runtime type of the
* specified array and the size of this collection.
*
* If the collection fits in the specified array with room to spare (i.e.,
* the array has more elements than the collection), the element in the
* array immediately following the end of the collection is set to
* null. This is useful in determining the length of the
* collection only if the caller knows that the collection does
* not contain any null elements.)
*
* @param ao the array into which the elements of the collection are
* to be stored, if it is big enough; otherwise, a new
* array of the same runtime type is allocated for this
* purpose
*
* @return an array containing the elements of the collection
*
* @throws ArrayStoreException if the runtime type of the specified array
* is not a supertype of the runtime type of every element in this
* collection
*/
public Object[] toArray(Object[] ao)
{
Object[] aoKey = getInternalCache().keySet().toArray();
int cKeys = aoKey.length;
// create the array to store the map contents
if (ao == null)
{
// implied Object[] type, see toArray()
ao = new Object[cKeys];
}
else if (ao.length < cKeys)
{
// if it is not big enough, a new array of the same runtime
// type is allocated
ao = (Object[]) Array.newInstance(ao.getClass().getComponentType(), cKeys);
}
else if (ao.length > cKeys)
{
// if the collection fits in the specified array with room to
// spare, the element in the array immediately following the
// end of the collection is set to null
ao[cKeys] = null;
}
for (int i = 0; i < cKeys; ++i)
{
ao[i] = instantiateEntry(aoKey[i]);
}
return ao;
}
/**
* Factory pattern: instantiate a Map.Entry object for the specified
* key.
*
* @param oKey the key
*
* @return a Map.Entry for the specified key
*/
protected Map.Entry instantiateEntry(Object oKey)
{
return new SimpleMapEntry(oKey)
{
public Object getValue()
{
return ReadWriteBackingMap.this.get(getKey());
}
public Object setValue(Object oValue)
{
return ReadWriteBackingMap.this.put(getKey(), oValue);
}
};
}
}
// ----- inner class: KeySet --------------------------------------------
/**
* Factory pattern: instantiate a key set for this backing map.
*
* @return a new KeySet object
*/
protected KeySet instantiateKeySet()
{
return new KeySet();
}
/**
* A set of entries backed by this backing map.
*/
protected class KeySet
extends AbstractSet
{
// ----- Set interface ------------------------------------------
/**
* Returns an iterator over the elements contained in this collection.
*
* @return an iterator over the elements contained in this collection
*/
public Iterator iterator()
{
return new Iterator()
{
public boolean hasNext()
{
return m_iter.hasNext();
}
public Object next()
{
Object oKey = m_iter.next();
m_oKeyPrev = oKey;
return oKey;
}
public void remove()
{
if (m_oKeyPrev == null)
{
throw new IllegalStateException();
}
else
{
ReadWriteBackingMap.this.removeInternal(m_oKeyPrev, true);
m_oKeyPrev = null;
}
}
private Iterator m_iter = getInternalCache().keySet().iterator();
private Object m_oKeyPrev;
};
}
/**
* Determine the number of keys in the Set.
*
* @return the number of keys in the Set, which is the same as the
* number of entries in the underlying Map
*/
public int size()
{
return ReadWriteBackingMap.this.getInternalCache().keySet().size();
}
/**
* Determine if a particular key is present in the Set.
*
* @return true iff the passed key object is in the key Set
*/
public boolean contains(Object oKey)
{
return ReadWriteBackingMap.this.containsKey(oKey);
}
/**
* Removes the specified element from this Set of keys if it is
* present by removing the associated entry from the underlying
* Map.
*
* @param o object to be removed from this set, if present
*
* @return true if the set contained the specified element
*/
public boolean remove(Object o)
{
ReadWriteBackingMap map = ReadWriteBackingMap.this;
boolean fExists = map.containsKey(o);
// whether or not the entry exists; store.erase() should be called
// in the same way as by the remove's behavior
map.removeInternal(o, true);
return fExists;
}
/**
* Removes the provided collection from this Set of keys by removing
* the associated entries from the underlying Map.
*
* @param colKeys objects to be removed from this set, if present
*
* @return true if the Map was modified as a result of this call
*/
public boolean removeAll(Collection colKeys)
{
return ReadWriteBackingMap.this.removeAll(colKeys);
}
/**
* Removes all of the elements from this set of Keys by clearing the
* underlying Map.
*/
public void clear()
{
ReadWriteBackingMap.this.clear();
}
/**
* Returns an array containing all of the keys in this set.
*
* @return an array containing all of the keys in this set
*/
public Object[] toArray()
{
return getInternalCache().keySet().toArray();
}
/**
* Returns an array with a runtime type is that of the specified array
* and that contains all of the keys in this Set. If the Set fits
* in the specified array, it is returned therein. Otherwise, a new
* array is allocated with the runtime type of the specified array
* and the size of this collection.
*
* If the Set fits in the specified array with room to spare (i.e.,
* the array has more elements than the Set), the element in the
* array immediately following the end of the Set is set to
* null. This is useful in determining the length of the
* Set only if the caller knows that the Set does
* not contain any null keys.)
*
* @param ao the array into which the elements of the Set are to
* be stored, if it is big enough; otherwise, a new array
* of the same runtime type is allocated for this purpose
*
* @return an array containing the elements of the Set
*
* @throws ArrayStoreException if the runtime type of the specified
* array is not a supertype of the runtime type of every
* element in this Set of keys
*/
public Object[] toArray(Object ao[])
{
return getInternalCache().keySet().toArray(ao);
}
}
// ----- inner class: ValuesCollection ----------------------------------
/**
* Factory pattern.
*
* @return a new instance of the ValuesCollection class (or subclass
* thereof)
*/
protected ValuesCollection instantiateValuesCollection()
{
return new ValuesCollection();
}
/**
* A collection of values backed by this map.
*/
protected class ValuesCollection
extends AbstractCollection
{
// ----- Collection interface -----------------------------------
/**
* Obtain an iterator over the values in the Map.
*
* @return an Iterator that provides a live view of the values in the
* underlying Map object
*/
public Iterator iterator()
{
return new Iterator()
{
public boolean hasNext()
{
return m_iter.hasNext();
}
public Object next()
{
return ReadWriteBackingMap.this.get(m_iter.next());
}
public void remove()
{
m_iter.remove();
}
private Iterator m_iter =
ReadWriteBackingMap.this.keySet().iterator();
};
}
/**
* Determine the number of values in the Collection.
*
* @return the number of values in the Collection, which is the same
* as the number of entries in the underlying Map
*/
public int size()
{
return ReadWriteBackingMap.this.size();
}
/**
* Removes all of the elements from this Collection of values by
* clearing the underlying Map.
*/
public void clear()
{
ReadWriteBackingMap.this.clear();
}
/**
* Returns an array containing all of the keys in this collection.
*
* @return an array containing all of the keys in this collection
*/
public Object[] toArray()
{
return getInternalCache().values().toArray();
}
/**
* Returns an array with a runtime type is that of the specified array
* and that contains all of the keys in this Collection. If the
* Collection fits in the specified array, it is returned therein.
* Otherwise, a new array is allocated with the runtime type of the
* specified array and the size of this collection.
*
* If the Collection fits in the specified array with room to spare
* (i.e., the array has more elements than the Collection), the
* element in the array immediately following the end of the
* Collection is set to null. This is useful in determining
* the length of the Collection only if the caller knows that
* the Collection does not contain any null elements.)
*
* @param ao the array into which the elements of the Collection are
* to be stored, if it is big enough; otherwise, a new
* array of the same runtime type is allocated for this
* purpose
*
* @return an array containing the elements of the Collection
*
* @throws ArrayStoreException if the runtime type of the specified
* array is not a supertype of the runtime type of every
* element in this Collection of values
*/
public Object[] toArray(Object ao[])
{
return getInternalCache().values().toArray(ao);
}
}
// ----- ObservableMap interface ----------------------------------------
/**
* {@inheritDoc}
*/
public void addMapListener(MapListener listener)
{
addMapListener(listener, null, false);
}
/**
* {@inheritDoc}
*/
public void removeMapListener(MapListener listener)
{
removeMapListener(listener, null);
}
/**
* {@inheritDoc}
*/
public synchronized void addMapListener(MapListener listener, Object oKey, boolean fLite)
{
Base.azzert(listener != null);
MapListenerSupport support = m_listenerSupport;
if (support == null)
{
support = m_listenerSupport = new MapListenerSupport();
}
support.addListener(listener, oKey, fLite);
}
/**
* {@inheritDoc}
*/
public synchronized void removeMapListener(MapListener listener, Object oKey)
{
Base.azzert(listener != null);
MapListenerSupport support = m_listenerSupport;
if (support != null)
{
support.removeListener(listener, oKey);
if (support.isEmpty())
{
m_listenerSupport = null;
}
}
}
/**
* {@inheritDoc}
*/
public synchronized void addMapListener(MapListener listener, Filter filter, boolean fLite)
{
Base.azzert(listener != null);
MapListenerSupport support = m_listenerSupport;
if (support == null)
{
support = m_listenerSupport = new MapListenerSupport();
}
support.addListener(listener, filter, fLite);
}
/**
* {@inheritDoc}
*/
public synchronized void removeMapListener(MapListener listener, Filter filter)
{
Base.azzert(listener != null);
MapListenerSupport support = m_listenerSupport;
if (support != null)
{
support.removeListener(listener, filter);
if (support.isEmpty())
{
m_listenerSupport = null;
}
}
}
// ----- Object methods -------------------------------------------------
/**
* Compares the specified object with this map for equality. Returns
* true if the given object is also a map and the two maps
* represent the same mappings.
*
* @param o object to be compared for equality with this map
*
* @return true if the specified object is equal to this map
*/
public boolean equals(Object o)
{
return o == this || o instanceof Map && getInternalCache().equals(o);
}
/**
* Returns the hash code value for this map.
*
* @return the hash code value for this map
*/
public int hashCode()
{
return getInternalCache().hashCode();
}
/**
* For debugging purposes, format the contents of the Map in a human-
* readable format.
*
* @return a String representation of this ReadWriteBackingMap
*/
public String toString()
{
return ClassHelper.getSimpleName(getClass()) +
'{' + getInternalCache() + '}';
}
// ----- internal cache -------------------------------------------------
/**
* Get the representative of the "in-memory" storage for this backing map.
*
* @return the ObservableMap object (never null) that this backing map
* uses to store entries
*/
public ObservableMap getInternalCache()
{
return m_mapInternal;
}
/**
* Get the map that provides internal storage for this backing map. If the
* internal storage is a ConfigurableCacheMap, then this accessor returns the
* same reference as {@link #getInternalCache}; otherwise it returns null.
* The refresh-ahead implementation relies on the internal storage
* providing the ConfigurableCacheMap interface, so this method will
* always return a non-null value if refresh-ahead is enabled.
*
* @return the cache for this backing map or null if the internal map is
* not an implementation of the ConfigurableCacheMap interface
*/
protected ConfigurableCacheMap getInternalConfigurableCache()
{
Map mapInternal = m_mapInternal;
return mapInternal instanceof ConfigurableCacheMap
? (ConfigurableCacheMap) mapInternal : null;
}
/**
* Configure the internal cache that this backing map uses to store its
* "in-memory" data.
*
* @param mapInternal the internal map
*/
protected void configureInternalCache(ObservableMap mapInternal)
{
m_mapInternal = mapInternal;
m_mapControl = instantiateControlMap();
m_listenerInternal = instantiateInternalListener();
mapInternal.addMapListener(getInternalListener());
}
/**
* Get the optional map used to cache CacheLoader (or CacheStore) misses.
*
* @return the Map that this backing map uses to cache CacheLoader (or
* CacheStore) misses or null if misses are not cached
*/
public Map getMissesCache()
{
return m_mapMisses;
}
/**
* Get the concurrency control map for this backing map.
*
* @return the ObservableMap object (never null) that this backing map
* uses to store entries
*/
public ConcurrentMap getControlMap()
{
return m_mapControl;
}
/**
* Factory pattern: Create the concurrency control map for this backing map.
*
* @return a new concurrency control map
*/
protected ConcurrentMap instantiateControlMap()
{
return new SegmentedConcurrentMap();
}
/**
* Get the map of keys for which the events should be marked as
* synthetic (internal).
*
* @return the map of keys to mark events as internal
*/
protected Map getSyntheticEventsMap()
{
Map map = m_mapSyntheticEvents;
if (map == null)
{
synchronized (this)
{
map = m_mapSyntheticEvents;
if (map == null)
{
map = m_mapSyntheticEvents = new SafeHashMap();
}
}
}
return map;
}
// ----- inner class: InternalMapListener -------------------------------
/**
* Obtain the MapListener that listens to the internal cache and routes
* those events to anyone listening to this ReadWriteBackingMap, creating
* such a listener if one does not already exist.
*
* @return a routing MapListener
*/
protected MapListener getInternalListener()
{
return m_listenerInternal;
}
/**
* Factory pattern: Create a MapListener that listens to the internal
* cache and routes those events to anyone listening to this
* ReadWriteBackingMap.
*
* @return a new routing MapListener
*/
protected MapListener instantiateInternalListener()
{
return new InternalMapListener();
}
/**
* A MapListener implementation that listens to the internal cache and
* routes those events to anyone listening to this ReadWriteBackingMap.
*
* @author cp 2002.10.22
*/
protected class InternalMapListener
extends Base
implements MapListener
{
/**
* Invoked when a map entry has been inserted.
*/
public void entryInserted(MapEvent evt)
{
// notify any listeners listening to this backing map
dispatch(evt);
}
/**
* Invoked when a map entry has been updated.
*/
public void entryUpdated(MapEvent evt)
{
// notify any listeners listening to this backing map
dispatch(evt);
}
/**
* Invoked when a map entry has been removed.
*/
public void entryDeleted(MapEvent evt)
{
if (isWriteBehind())
{
// most commonly, the installed eviction approver would not allow
// eviction of not-yet-persisted entries; however in a very rare
// case when the internal map is not configurable, or the approver
// has been changed outside of our control we may need to ensure
// that the corresponding entry has been persisted
ConfigurableCacheMap mapInternal = getInternalConfigurableCache();
ConfigurableCacheMap.EvictionApprover approver = mapInternal == null ? null : mapInternal.getEvictionApprover();
boolean fApproverChanged = mapInternal != null &&
approver != ConfigurableCacheMap.EvictionApprover.DISAPPROVER &&
approver != f_writeBehindDisapprover;
if (mapInternal == null || fApproverChanged)
{
if (fApproverChanged)
{
err(String.format("The internal map of a ReadWriteBackingMap has an " +
"unexpected EvictionApprover(type=%s); custom maps should accept " +
"and use the supplied approver.", (approver == null ? null : approver.getClass().getName())));
}
ConcurrentMap mapControl = getControlMap();
Object oKey = evt.getKey();
if (mapControl.lock(oKey, 500L))
{
try
{
if (getContext().isKeyOwned(oKey))
{
processDeletedEntry(oKey, evt.getOldValue());
}
}
finally
{
mapControl.unlock(oKey);
}
}
else
{
Object oValueOld = evt.getOldValue();
Object oValue = getInternalCache().put(oKey, oValueOld);
if (oValue != null && !equals(oValue, oValueOld))
{
String sCulprit = getCacheStore() == null ?
"backing map" : "cache store";
err("Due to an exceptionally long " + sCulprit +
" operation an eviction event cannot be processed" +
" in order. Canceling the eviction: " + evt);
}
return;
}
}
}
// notify any listeners listening to this backing map
dispatch(evt);
}
// ----- helpers ----------------------------------------------------
/**
* Process an entry that is about to be removed from the internal cache.
* This method is called after the entry has been successfully locked, but
* before any listeners are notified.
*
* If the entry is queued to be inserted or updated, then that must occur
* (be persisted) before we notify any listeners that it has been
* removed from the internal cache, otherwise (for example) if this server
* dies and it has the only copy of the pending update then the update
* will be lost!
*
* @param oKey the key
* @param oValueOld the old value
*/
protected void processDeletedEntry(Object oKey, Object oValueOld)
{
StoreWrapper store = getCacheStore();
if (store != null && !isReadOnly())
{
Entry entry = removeFromWriteQueue(oKey);
if (entry != null)
{
// the store operation may throw an exception if the
// RWBM is configured to do so; to preserve the
// behavior of the RWBM prior to COH-125, catch the
// exception and log a message
try
{
store.store(entry, false);
}
catch (WrapperException e)
{
log(e);
}
}
}
}
/**
* Dispatch the event to the corresponding listeners.
*
* @param evt the MapEvent object
*/
protected void dispatch(final MapEvent evt)
{
MapListenerSupport support = m_listenerSupport;
if (support != null)
{
Object oKey = evt.getKey();
boolean fSynthetic =
(evt instanceof CacheEvent && ((CacheEvent)evt).isSynthetic()) ||
getSyntheticEventsMap().containsKey(oKey);
MapEvent evtNew = new CacheEvent(ReadWriteBackingMap.this, evt.getId(),
oKey, null, null, fSynthetic,
CacheEvent.TransformationState.TRANSFORMABLE, false,
(evt instanceof CacheEvent && ((CacheEvent)evt).isExpired()))
{
public Object getOldValue()
{
return evt.getOldValue();
}
public Object getNewValue()
{
return evt.getNewValue();
}
};
support.fireEvent(evtNew, true);
}
}
}
// ----- life cycle -----------------------------------------------------
/**
* Release the backing map when it is no longer being used.
*/
public void release()
{
if (isActive())
{
try
{
getInternalCache().removeMapListener(getInternalListener());
}
catch (Exception e)
{
Base.err("An exception occurred while removing an internal"
+ " listener during release:");
Base.err(e);
}
if (isRefreshAhead())
{
terminateReadThread();
}
if (isWriteBehind())
{
terminateWriteThread();
}
m_store = null;
m_fActive = false;
}
}
/**
* Determine if the backing map is still active.
*
* @return true if the backing map is still active
*/
public boolean isActive()
{
return m_fActive;
}
// ----- inner class: ReadLatch -----------------------------------------
/**
* Factory pattern: Instantiate a new read latch the given key.
*
* @param oKey the key
*
* @return the read latch
*/
protected ReadLatch instantiateReadLatch(Object oKey)
{
return new ReadLatch(oKey);
}
/**
* A synchronization construct used to coordinate asynchronous loads by the
* refresh-ahead thread with cache accesses by other threads.
*
* The refresh-ahead thread places a new ReadLatch in the control
* map before performing a load operation on the cache store. The presence of
* the latch signals to a thread executing the {@link ReadWriteBackingMap#get}
* method that an asynchronous load is in progress. This thread can then
* wait on the latch to get the results of the asynchronous load. This thread
* is then responsible for removing the latch from the control map.
*
* Additionally, a thread performing a {@link ReadWriteBackingMap#put} or
* {@link ReadWriteBackingMap#remove} operation can cancel an ongoing
* asynchronous load using the latch. This thread is also responsible for
* removing the latch from the control map.
*
* @author jh 2005.02.11
*/
protected static class ReadLatch
{
// ----- constructors -------------------------------------------
/**
* Create a new ReadLatch for the specified key.
*
* @param oKey the key that is being loaded by the refresh-ahead thread
*/
protected ReadLatch(Object oKey)
{
m_oKey = oKey;
}
// ----- latch operations ---------------------------------------
/**
* Cancel the load operation. This method has no effect if the operation
* has already been completed or canceled.
*/
public synchronized void cancel()
{
cancel(null);
}
/**
* Cancel the load operation due to an exception. This method has no
* effect if the operation has already been completed or canceled.
*
* @param t the exception responsible for cancelling the load
*/
public synchronized void cancel(Throwable t)
{
if (!m_fCanceled && !m_fComplete)
{
m_oValue = null;
m_throwable = t;
m_fCanceled = true;
m_fComplete = true;
notifyAll();
}
}
/**
* Complete the load operation. The specified value and entry is the
* result of the load operation. This method has no effect if the
* operation has already been completed or canceled.
*
* @param oValue the result of the load operation
*/
public synchronized void complete(Object oValue)
{
if (!m_fCanceled && !m_fComplete)
{
m_oValue = oValue;
m_fComplete = true;
notifyAll();
}
}
// ----- accessors ----------------------------------------------
/**
* Return true if the load operation is complete. The results
* of the load operation can be retrieved using the {@link #getValue}
* method.
*
* @return true if the load operation is complete
*/
public boolean isComplete()
{
return m_fComplete;
}
/**
* Return true if the load operation has been canceled.
*
* @return true if the load operation has been canceled
*/
public boolean isCanceled()
{
return m_fCanceled;
}
/**
* Return the key that is being loaded by the refresh-ahead thread.
*
* @return the key that is being loaded
*/
public Object getKey()
{
return m_oKey;
}
/**
* Return the result of the load operation.
*
* Note: this method should not be called by the refresh-ahead daemon
* thread
*
* @return the result of the load operation
*/
public synchronized Object getValue()
{
Throwable throwable = m_throwable;
if (throwable != null && m_fCanceled)
{
throw Base.ensureRuntimeException(throwable);
}
return m_oValue;
}
// ----- data members -------------------------------------------
/**
* Flag that indicates whether or not the load operation has completed.
*/
private volatile boolean m_fComplete;
/**
* Flag that indicates whether or not the load operation has been
* canceled.
*/
private volatile boolean m_fCanceled;
/**
* The key that is being loaded by the refresh-ahead thread.
*/
private Object m_oKey;
/**
* The result of the load operation.
*/
private Object m_oValue;
/**
* A Throwable associated with a canceled operation.
*/
private Throwable m_throwable;
}
// ----- inner class: ReadQueue (refresh-ahead queue) -------------------
/**
* Get the queue of keys that are yet to be read.
*
* @return the refresh-ahead queue object
*/
public ReadQueue getReadQueue()
{
return m_queueRead;
}
/**
* Factory pattern: Instantiate a new ReadQueue object.
*
* @return a new ReadQueue object
*/
protected ReadQueue instantiateReadQueue()
{
return new ReadQueue();
}
/**
* A queue of keys that should be read from the underlying
* CacheStore.
*
* @author jh 2005.02.08
*/
public class ReadQueue
extends CoherenceCommunityEdition
{
// ----- constructors -------------------------------------------
/**
* Construct a ReadQueue.
*/
protected ReadQueue()
{
}
// ----- Queue API ----------------------------------------------
/**
* Add a key to the queue. This method has no effect if the key is
* already queued.
*
* @param oKey the key object
*
* @return true if the key was added to the queue; false otherwise
*/
public synchronized boolean add(Object oKey)
{
Map map = getKeyMap();
if (map.get(oKey) == null)
{
map.put(oKey, oKey);
List list = getKeyList();
boolean fWasEmpty = list.isEmpty();
list.add(oKey);
if (fWasEmpty)
{
this.notify(); // @see peek()
}
return true;
}
return false;
}
/**
* Wait for a key to be added to the queue and return it without
* removing it from the queue.
*
* @return the next item in the queue (it will only return null when the
* backing map is no longer active)
*/
public Object peek()
{
return peek(-1);
}
/**
* Wait for a key (up to the specified wait time) to be added to the
* queue and return it without removing it from the queue, or null if
* the specified wait time has passed).
*
* @param cMillis the number of ms to wait for a key in the queue; pass
* -1 to wait indefinitely or 0 for no wait
*
* @return the next item in the queue, or null if the wait time has passed
* or if the backing map is no longer active
*/
public synchronized Object peek(long cMillis)
{
List list = getKeyList();
while (true)
{
if (!isActive())
{
return null;
}
if (!list.isEmpty())
{
return list.get(0);
}
else if (cMillis == 0L)
{
return null;
}
// cap the wait time so that during shutdown the
// thread will eventually recheck the active flag
long cWait = (cMillis < 0L || cMillis > 1000L) ? 1000L : cMillis;
waitFor(this, cWait);
if (cMillis > 0L)
{
// if we run out of time, set cMillis to 0 so we
// check the list one last time before giving up
cMillis = Math.max(0L, cMillis - cWait);
}
}
}
/**
* Remove a key from the queue if the key is in the queue.
*
* @param oKey the key object
*
* @return true if the key was removed from the queue; false otherwise
*/
public synchronized boolean remove(Object oKey)
{
if (getKeyMap().remove(oKey) != null)
{
getKeyList().remove(oKey);
return true;
}
return false;
}
/**
* Select the next key from the refresh-ahead queue that is a candidate
* for an asynchronous load. A key is a candidate if it can be locked
* "quickly".
*
* This method performs the selection process by iterating through the
* refresh-ahead queue starting with the first key in the queue. If the
* queue is empty, this method will block until a key is added to the
* queue. A key is skipped if it cannot be locked within the specified
* wait time.
*
* If a candidate key is found, a new ReadLatch for the key is
* placed in the control map and returned; otherwise, null is
* returned.
*
* @param cWaitMillis the maximum amount of time (in milliseconds) to
* wait to select a key and acquire a latch on it;
* pass -1 to wait indefinitely
*
* @return a ReadLatch for the selected key or null if
* a candidate key was not found and latched within the specified
* time
*/
protected ReadLatch select(long cWaitMillis)
{
List listKeys = getKeyList();
ConcurrentMap mapControl = getControlMap();
Object oKey;
if (cWaitMillis == -1L)
{
oKey = peek(-1);
}
else
{
long ldtStart = Base.getSafeTimeMillis();
oKey = peek(cWaitMillis);
cWaitMillis -= Math.max(0L, Base.getSafeTimeMillis() - ldtStart);
}
if (oKey == null)
{
// couldn't find an entry within the wait time
return null;
}
do
{
long cWaitLatch = 0L;
int index = 0;
while (oKey != null)
{
boolean fRemoved = false;
// make sure the key can be locked within the specified time
if (mapControl.lock(oKey, cWaitLatch))
{
try
{
// make sure the key is still in the queue; it could
// have been removed by a call to get(), put(), or
// remove() before we had a chance to lock the key
if (remove(oKey))
{
ReadLatch latch = instantiateReadLatch(oKey);
mapControl.put(oKey, latch);
return latch;
}
else
{
fRemoved = true;
}
}
finally
{
mapControl.unlock(oKey);
}
}
// adjust the wait time
cWaitMillis -= cWaitLatch;
if (cWaitMillis < 0)
{
// ran out of time
break;
}
// get the next key in the queue
try
{
oKey = listKeys.get(fRemoved ? index : ++index);
}
catch (IndexOutOfBoundsException e)
{
break;
}
cWaitLatch += 10L;
}
}
while (cWaitMillis != 0L);
return null;
}
/**
* Remove all keys from the queue.
*/
public synchronized void clear()
{
getKeyMap().clear();
getKeyList().clear();
}
/**
* Return the length of the queue.
*
* @return the length of the queue
*/
public int size()
{
return getKeyList().size();
}
/**
* For debugging purposes, present the queue in human-readable format.
*
* @return a String representation of this object
*/
public String toString()
{
return "ReadQueue: " + getKeyList();
}
// ----- internal -----------------------------------------------
/**
* Return a list of keys in the queue.
*
* @return a list of keys in the queue
*/
protected List getKeyList()
{
return m_listQueued;
}
/**
* Return a map of keys in the queue.
*
* Note: The map returned from this method is not thread-safe; therefore,
* a lock on this ReadQueue must be obtained before accessing
* the map
*
* @return a map of keys in the queue
*/
protected Map