Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
net.spy.memcached.MemcachedClient Maven / Gradle / Ivy
/**
* Copyright (C) 2006-2009 Dustin Sallings
* Copyright (C) 2009-2013 Couchbase, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
* IN THE SOFTWARE.
*
*
* Portions Copyright (C) 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
package net.spy.memcached;
import net.spy.memcached.auth.AuthDescriptor;
import net.spy.memcached.auth.AuthThreadMonitor;
import net.spy.memcached.compat.SpyObject;
import net.spy.memcached.config.ClusterConfiguration;
import net.spy.memcached.ConfigurationPoller;
import net.spy.memcached.config.NodeEndPoint;
import net.spy.memcached.internal.BulkFuture;
import net.spy.memcached.internal.BulkGetFuture;
import net.spy.memcached.internal.GetConfigFuture;
import net.spy.memcached.internal.GetFuture;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.internal.SingleElementInfiniteIterator;
import net.spy.memcached.ops.CASOperationStatus;
import net.spy.memcached.ops.CancelledOperationStatus;
import net.spy.memcached.ops.ConcatenationType;
import net.spy.memcached.ops.ConfigurationType;
import net.spy.memcached.ops.DeleteConfigOperation;
import net.spy.memcached.ops.DeleteOperation;
import net.spy.memcached.ops.GetAndTouchOperation;
import net.spy.memcached.ops.GetConfigOperation;
import net.spy.memcached.ops.GetOperation;
import net.spy.memcached.ops.GetsOperation;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationErrorType;
import net.spy.memcached.ops.OperationException;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatsOperation;
import net.spy.memcached.ops.StatusCode;
import net.spy.memcached.ops.StoreOperation;
import net.spy.memcached.ops.StoreType;
import net.spy.memcached.ops.TimedOutOperationStatus;
import net.spy.memcached.protocol.binary.BinaryOperationFactory;
import net.spy.memcached.transcoders.SerializingTranscoder;
import net.spy.memcached.transcoders.TranscodeService;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.util.StringUtils;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
/**
* Client to a memcached server.
*
* Basic usage
*
*
* The Client can be run in static mode or dynamic mode. In basic usage the mode is automatically
* determined based on the endpoint specified. If the endpoint has cfg subdomain, then the client is
* initialized in dynamic mode.
*
* // Use dynamic mode to leverage Elasticache Autodiscovery feature.
* // In dynamic mode, the number of servers in the cluster and their endpoint details are automatically picked up
* // using the configuration endpoint of the elasticache cluster.
* MemcachedClient c = new MemcachedClient(
* new InetSocketAddress("configurationEndpoint", portNum));
*
* // Store a value (async) for one hour
* c.set("someKey", 3600, someObject);
* // Retrieve a value.
* Object myObject = c.get("someKey");
*
*
* In the basic usage with out connection factory, the client mode is automatically determined
*
* Advanced Usage
*
*
* MemcachedClient may be processing a great deal of asynchronous messages or
* possibly dealing with an unreachable memcached, which may delay processing.
* If a memcached is disabled, for example, MemcachedConnection will continue to
* attempt to reconnect and replay pending operations until it comes back up. To
* prevent this from causing your application to hang, you can use one of the
* asynchronous mechanisms to time out a request and cancel the operation to the
* server.
*
*
*
* // Get a memcached client connected over the binary protocol
* // The number of servers in the cluster and their endpoint details are automatically picked up
* // using the configuration endpoint of the elasticache cluster.
* MemcachedClient c = new MemcachedClient(new BinaryConnectionFactory(ClientMode.Dynamic),
* AddrUtil.getAddresses("configurationEndpoint:11211"));
* // or //
* // For operating with out the autodiscovery feature, use static mode(ClientMode.Static)
* MemcachedClient c = new MemcachedClient(new BinaryConnectionFactory(ClientMode.Static),
* AddrUtil.getAddresses("configurationEndpoint:11211"));
*
* // Try to get a value, for up to 5 seconds, and cancel if it
* // doesn't return
* Object myObj = null;
* Future<Object> f = c.asyncGet("someKey");
* try {
* myObj = f.get(5, TimeUnit.SECONDS);
* // throws expecting InterruptedException, ExecutionException
* // or TimeoutException
* } catch (Exception e) { /* /
* // Since we don't need this, go ahead and cancel the operation.
* // This is not strictly necessary, but it'll save some work on
* // the server. It is okay to cancel it if running.
* f.cancel(true);
* // Do other timeout related stuff
* }
*
*
* Optionally, it is possible to activate a check that makes sure that
* the node is alive and responding before running actual operations (even
* before authentication. Only enable this if you are sure that you do not
* run into issues during connection (some memcached services have problems
* with it). You can enable it by setting the net.spy.verifyAliveOnConnect
* System Property to "true".
*/
public class MemcachedClient extends SpyObject implements MemcachedClientIF,
ConnectionObserver {
protected final ClientMode clientMode;
protected volatile boolean shuttingDown;
protected final long operationTimeout;
protected MemcachedConnection mconn;
protected final OperationFactory opFact;
protected final Transcoder transcoder;
protected final TranscodeService tcService;
protected final AuthDescriptor authDescriptor;
protected final ConnectionFactory connFactory;
protected final AuthThreadMonitor authMonitor = new AuthThreadMonitor();
protected final ExecutorService executorService;
private NodeEndPoint configurationNode;
//Set default value to true to attempt config API first. The value is set to false if
//OperationNotSupportedException is thrown.
private boolean isConfigurationProtocolSupported = true;
//This is used to dynamic mode to track whether the client is initialized with set of cache nodes for the first time.
private boolean isConfigurationInitialized = false;
private Transcoder configTranscoder = new SerializingTranscoder();
private ConfigurationPoller configPoller;
/**
* Get a memcache client operating on the specified memcached locations.
*
* @param addrs the memcached locations
* @throws IOException
*/
public MemcachedClient(InetSocketAddress... addrs) throws IOException {
//The connectionFactory is created later based on client mode.
this(null, Arrays.asList(addrs), true);
}
/**
* Get a memcache client over the specified memcached locations.
*
* @param addrs the socket addrs
* @throws IOException if connections cannot be established
*/
public MemcachedClient(List addrs) throws IOException {
//The connectionFactory is created later based on client mode.
this(null, addrs, true);
}
public MemcachedClient(ConnectionFactory cf, List addrs) throws IOException{
this(cf, addrs, cf != null && cf.getClientMode() == ClientMode.Unset);
}
/**
* Get a memcache client over the specified memcached locations.
*
* @param cf the connection factory to configure connections for this client
* @param addrs the socket addresses
* @throws IOException if connections cannot be established
*/
private MemcachedClient(ConnectionFactory cf, List addrs, boolean determineClientMode) throws IOException{
if (addrs == null) {
throw new NullPointerException("Server list required");
}
if (addrs.isEmpty()) {
throw new IllegalArgumentException("You must have at least one server to"
+ " connect to");
}
//An internal customer convenience check to determine whether the client mode based on
// the DNS name if only one endpoint is specified.
if(determineClientMode){
boolean isClientModeDetermined = false;
if(addrs.size() == 1){
if(addrs.get(0) == null){
throw new NullPointerException("Socket address is null");
}
String hostName = addrs.get(0).getHostName();
//All config endpoints has ".cfg." subdomain in the DNS name.
if(hostName != null && hostName.contains(".cfg.")){
cf = updateClientMode(cf, ClientMode.Dynamic);
isClientModeDetermined = true;
}
}
//Fallback to static mode
if (!isClientModeDetermined) {
cf = updateClientMode(cf, ClientMode.Static);
isClientModeDetermined = true;
}
}
if (cf == null) {
throw new NullPointerException("Connection factory required");
}
if (cf.getOperationTimeout() <= 0) {
throw new IllegalArgumentException("Operation timeout must be positive.");
}
if(cf.getClientMode() == ClientMode.Dynamic && addrs.size() > 1){
throw new IllegalArgumentException("Only one configuration endpoint is valid with dynamic client mode.");
}
connFactory = cf;
clientMode = cf.getClientMode();
tcService = new TranscodeService(cf.isDaemon());
transcoder = cf.getDefaultTranscoder();
opFact = cf.getOperationFactory();
assert opFact != null : "Connection factory failed to make op factory";
if(clientMode == ClientMode.Dynamic){
initializeClientUsingConfigEndPoint(cf, addrs.get(0));
} else {
setupConnection(cf, addrs);
}
operationTimeout = cf.getOperationTimeout();
authDescriptor = cf.getAuthDescriptor();
executorService = cf.getListenerExecutorService();
if (authDescriptor != null) {
addObserver(this);
}
}
private ConnectionFactory updateClientMode(ConnectionFactory f, ClientMode mode) {
if (f == null) {
f = new DefaultConnectionFactory(mode);
} else {
f.setClientMode(mode);
}
return f;
}
/**
* Establish a connection to the configuration endpoint and get the list of cache node endpoints. Then initialize the
* memcached client with the cache node endpoints list.
* @param cf
* @param addrs
* @throws IOException
*/
private void initializeClientUsingConfigEndPoint(ConnectionFactory cf, InetSocketAddress configurationEndPoint)
throws IOException{
configurationNode = new NodeEndPoint(configurationEndPoint.getHostName(), configurationEndPoint.getPort());
setupConnection(cf, Collections.singletonList(configurationEndPoint));
boolean checkKey = false;
String configResult = null;
try{
try{
//GetConfig
configResult = (String)this.getConfig(configurationEndPoint, ConfigurationType.CLUSTER, configTranscoder);
}catch(OperationNotSupportedException e){
checkKey = true;
}
if(checkKey || configResult == null || configResult.trim().isEmpty()){
configResult = (String)this.get(configurationEndPoint, ConfigurationType.CLUSTER.getValueWithNameSpace(), configTranscoder);
if(configResult != null && ! configResult.trim().isEmpty()){
isConfigurationProtocolSupported = false;
}
}
if(configResult != null && ! configResult.trim().isEmpty()){
//Parse configuration to get the list of cache servers.
ClusterConfiguration clusterConfiguration = AddrUtil.parseClusterTypeConfiguration(configResult);
//Initialize client with the actual set of endpoints.
mconn.notifyUpdate(clusterConfiguration);
mconn.waitForInitialConfigApplied();
isConfigurationInitialized = true;
}
}catch(OperationTimeoutException e){
getLogger().warn("Configuration endpoint timed out for config call. Leaving the initialization work to configuration poller.");
}
//Initialize and start the poller.
configPoller = new ConfigurationPoller(this, cf.getDynamicModePollingInterval(), cf.isDaemon());
configPoller.subscribeForClusterConfiguration(mconn);
}
private void setupConnection(ConnectionFactory cf, List addrs)
throws IOException {
mconn = cf.createConnection(addrs);
assert mconn != null : "Connection factory failed to make a connection";
}
public NodeEndPoint getConfigurationNode(){
return configurationNode;
}
/**
* Get the addresses of available servers.
*
*
* This is based on a snapshot in time so shouldn't be considered completely
* accurate, but is a useful for getting a feel for what's working and what's
* not working.
*
*
* @return point-in-time view of currently available servers
*/
@Override
public Collection getAvailableServers() {
ArrayList rv = new ArrayList();
for (MemcachedNode node : mconn.getLocator().getAll()) {
if (node.isActive()) {
rv.add(node.getSocketAddress());
}
}
return rv;
}
/**
* Get the endpoints of available servers.
* Use this method instead of "getAvailableServers" if details about hostname, ipAddress and port of the servers
* are required.
*
*
* This is based on a snapshot in time so shouldn't be considered completely
* accurate, but is a useful for getting a feel for what's working and what's
* not working.
*
*
* @return point-in-time view of currently available servers
*/
public Collection getAvailableNodeEndPoints() {
ArrayList rv = new ArrayList();
for (MemcachedNode node : mconn.getLocator().getAll()) {
if (node.isActive()) {
rv.add(node.getNodeEndPoint());
}
}
return rv;
}
/**
* Get the endpoints of all servers.
*
* @return point-in-time view of current list of servers
*/
public Collection getAllNodeEndPoints() {
ArrayList rv = new ArrayList();
for (MemcachedNode node : mconn.getLocator().getAll()) {
rv.add(node.getNodeEndPoint());
}
return rv;
}
/**
* Get the addresses of unavailable servers.
*
*
* This is based on a snapshot in time so shouldn't be considered completely
* accurate, but is a useful for getting a feel for what's working and what's
* not working.
*
*
* @return point-in-time view of currently available servers
*/
@Override
public Collection getUnavailableServers() {
ArrayList rv = new ArrayList();
for (MemcachedNode node : mconn.getLocator().getAll()) {
if (!node.isActive()) {
rv.add(node.getSocketAddress());
}
}
return rv;
}
/**
* Get a read-only wrapper around the node locator wrapping this instance.
*
* @return this instance's NodeLocator
*/
@Override
public NodeLocator getNodeLocator() {
return mconn.getLocator().getReadonlyCopy();
}
/**
* Get the default transcoder that's in use.
*
* @return this instance's Transcoder
*/
@Override
public Transcoder getTranscoder() {
return transcoder;
}
@Override
public CountDownLatch broadcastOp(final BroadcastOpFactory of) {
return broadcastOp(of, mconn.getLocator().getAll(), true);
}
@Override
public CountDownLatch broadcastOp(final BroadcastOpFactory of,
Collection nodes) {
return broadcastOp(of, nodes, true);
}
private CountDownLatch broadcastOp(BroadcastOpFactory of,
Collection nodes, boolean checkShuttingDown) {
checkState();
if (checkShuttingDown && shuttingDown) {
throw new IllegalStateException("Shutting down");
}
return mconn.broadcastOperation(of, nodes);
}
private OperationFuture asyncStore(StoreType storeType,
String key, int exp, T value, Transcoder tc) {
CachedData co = tc.encode(value);
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture rv =
new OperationFuture(key, latch, operationTimeout,
executorService);
Operation op = opFact.store(storeType, key, co.getFlags(), exp,
co.getData(), new StoreOperation.Callback() {
@Override
public void receivedStatus(OperationStatus val) {
rv.set(val.isSuccess(), val);
}
@Override
public void gotData(String key, long cas) {
rv.setCas(cas);
}
@Override
public void complete() {
latch.countDown();
rv.signalComplete();
}
});
rv.setOperation(op);
enqueueOperation(key, op);
return rv;
}
private OperationFuture asyncStore(StoreType storeType, String key,
int exp, Object value) {
return asyncStore(storeType, key, exp, value, transcoder);
}
private OperationFuture asyncCat(ConcatenationType catType,
long cas, String key, T value, Transcoder tc) {
CachedData co = tc.encode(value);
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture rv = new OperationFuture(key,
latch, operationTimeout, executorService);
Operation op = opFact.cat(catType, cas, key, co.getData(),
new OperationCallback() {
@Override
public void receivedStatus(OperationStatus val) {
rv.set(val.isSuccess(), val);
}
@Override
public void complete() {
latch.countDown();
rv.signalComplete();
}
});
rv.setOperation(op);
enqueueOperation(key, op);
return rv;
}
/**
* Touch the given key to reset its expiration time with the default
* transcoder.
*
* @param key the key to fetch
* @param exp the new expiration to set for the given key
* @return a future that will hold the return value of whether or not the
* fetch succeeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture touch(final String key, final int exp) {
return touch(key, exp, transcoder);
}
/**
* Touch the given key to reset its expiration time.
*
* @param key the key to fetch
* @param exp the new expiration to set for the given key
* @param tc the transcoder to serialize and unserialize value
* @return a future that will hold the return value of whether or not the
* fetch succeeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture touch(final String key, final int exp,
final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture rv =
new OperationFuture(key, latch, operationTimeout,
executorService);
Operation op = opFact.touch(key, exp, new OperationCallback() {
@Override
public void receivedStatus(OperationStatus status) {
rv.set(status.isSuccess(), status);
}
@Override
public void complete() {
latch.countDown();
rv.signalComplete();
}
});
rv.setOperation(op);
enqueueOperation(key, op);
return rv;
}
/**
* Append to an existing value in the cache.
*
* If 0 is passed in as the CAS identifier, it will override the value
* on the server without performing the CAS check.
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
* @param cas cas identifier (ignored in the ascii protocol)
* @param key the key to whose value will be appended
* @param val the value to append
* @return a future indicating success, false if there was no change to the
* value
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture append(long cas, String key, Object val) {
return append(cas, key, val, transcoder);
}
/**
* Append to an existing value in the cache.
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
* @param key the key to whose value will be appended
* @param val the value to append
* @return a future indicating success, false if there was no change to the
* value
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture append(String key, Object val) {
return append(0, key, val, transcoder);
}
/**
* Append to an existing value in the cache.
*
* If 0 is passed in as the CAS identifier, it will override the value
* on the server without performing the CAS check.
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
* @param
* @param cas cas identifier (ignored in the ascii protocol)
* @param key the key to whose value will be appended
* @param val the value to append
* @param tc the transcoder to serialize and unserialize the value
* @return a future indicating success
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture append(long cas, String key, T val,
Transcoder tc) {
return asyncCat(ConcatenationType.append, cas, key, val, tc);
}
/**
* Append to an existing value in the cache.
*
* If 0 is passed in as the CAS identifier, it will override the value
* on the server without performing the CAS check.
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
* @param
* @param key the key to whose value will be appended
* @param val the value to append
* @param tc the transcoder to serialize and unserialize the value
* @return a future indicating success
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture append(String key, T val,
Transcoder tc) {
return asyncCat(ConcatenationType.append, 0, key, val, tc);
}
/**
* Prepend to an existing value in the cache.
*
* If 0 is passed in as the CAS identifier, it will override the value
* on the server without performing the CAS check.
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
* @param cas cas identifier (ignored in the ascii protocol)
* @param key the key to whose value will be prepended
* @param val the value to append
* @return a future indicating success
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture prepend(long cas, String key, Object val) {
return prepend(cas, key, val, transcoder);
}
/**
* Prepend to an existing value in the cache.
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
* @param key the key to whose value will be prepended
* @param val the value to append
* @return a future indicating success
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture prepend(String key, Object val) {
return prepend(0, key, val, transcoder);
}
/**
* Prepend to an existing value in the cache.
*
* If 0 is passed in as the CAS identifier, it will override the value
* on the server without performing the CAS check.
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
* @param
* @param cas cas identifier (ignored in the ascii protocol)
* @param key the key to whose value will be prepended
* @param val the value to append
* @param tc the transcoder to serialize and unserialize the value
* @return a future indicating success
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture prepend(long cas, String key, T val,
Transcoder tc) {
return asyncCat(ConcatenationType.prepend, cas, key, val, tc);
}
/**
* Prepend to an existing value in the cache.
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
* @param
* @param key the key to whose value will be prepended
* @param val the value to append
* @param tc the transcoder to serialize and unserialize the value
* @return a future indicating success
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture prepend(String key, T val,
Transcoder tc) {
return asyncCat(ConcatenationType.prepend, 0, key, val, tc);
}
/**
* Asynchronous CAS operation.
*
* @param
* @param key the key
* @param casId the CAS identifier (from a gets operation)
* @param value the new value
* @param tc the transcoder to serialize and unserialize the value
* @return a future that will indicate the status of the CAS
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture
asyncCAS(String key, long casId, T value, Transcoder tc) {
return asyncCAS(key, casId, 0, value, tc);
}
/**
* Asynchronous CAS operation.
*
* @param
* @param key the key
* @param casId the CAS identifier (from a gets operation)
* @param exp the expiration of this object
* @param value the new value
* @param tc the transcoder to serialize and unserialize the value
* @return a future that will indicate the status of the CAS
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture
asyncCAS(String key, long casId, int exp, T value, Transcoder tc) {
CachedData co = tc.encode(value);
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture rv =
new OperationFuture(key, latch, operationTimeout,
executorService);
Operation op = opFact.cas(StoreType.set, key, casId, co.getFlags(), exp,
co.getData(), new StoreOperation.Callback() {
@Override
public void receivedStatus(OperationStatus val) {
if (val instanceof CASOperationStatus) {
rv.set(((CASOperationStatus) val).getCASResponse(), val);
} else if (val instanceof CancelledOperationStatus) {
getLogger().debug("CAS operation cancelled");
} else if (val instanceof TimedOutOperationStatus) {
getLogger().debug("CAS operation timed out");
} else {
throw new RuntimeException("Unhandled state: " + val);
}
}
@Override
public void gotData(String key, long cas) {
rv.setCas(cas);
}
@Override
public void complete() {
latch.countDown();
rv.signalComplete();
}
});
rv.setOperation(op);
enqueueOperation(key, op);
return rv;
}
/**
* Asynchronous CAS operation using the default transcoder.
*
* @param key the key
* @param casId the CAS identifier (from a gets operation)
* @param value the new value
* @return a future that will indicate the status of the CAS
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture
asyncCAS(String key, long casId, Object value) {
return asyncCAS(key, casId, value, transcoder);
}
/**
* Asynchronous CAS operation using the default transcoder with expiration.
*
* @param key the key
* @param casId the CAS identifier (from a gets operation)
* @param exp the expiration of this object
* @param value the new value
* @return a future that will indicate the status of the CAS
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture
asyncCAS(String key, long casId, int exp, Object value) {
return asyncCAS(key, casId, exp, value, transcoder);
}
/**
* Perform a synchronous CAS operation.
*
* @param
* @param key the key
* @param casId the CAS identifier (from a gets operation)
* @param value the new value
* @param tc the transcoder to serialize and unserialize the value
* @return a CASResponse
* @throws OperationTimeoutException if global operation timeout is exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public CASResponse cas(String key, long casId, T value,
Transcoder tc) {
return cas(key, casId, 0, value, tc);
}
/**
* Perform a synchronous CAS operation.
*
* @param
* @param key the key
* @param casId the CAS identifier (from a gets operation)
* @param exp the expiration of this object
* @param value the new value
* @param tc the transcoder to serialize and unserialize the value
* @return a CASResponse
* @throws OperationTimeoutException if global operation timeout is exceeded
* @throws CancellationException if operation was canceled
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public CASResponse cas(String key, long casId, int exp, T value,
Transcoder tc) {
CASResponse casr;
try {
OperationFuture casOp = asyncCAS(key,
casId, exp, value, tc);
casr = casOp.get(operationTimeout,
TimeUnit.MILLISECONDS);
return casr;
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for value", e);
} catch (ExecutionException e) {
if(e.getCause() instanceof CancellationException) {
throw (CancellationException) e.getCause();
} else {
throw new RuntimeException("Exception waiting for value", e);
}
} catch (TimeoutException e) {
throw new OperationTimeoutException("Timeout waiting for value: "
+ buildTimeoutMessage(operationTimeout, TimeUnit.MILLISECONDS), e);
}
}
/**
* Perform a synchronous CAS operation with the default transcoder.
*
* @param key the key
* @param casId the CAS identifier (from a gets operation)
* @param value the new value
* @return a CASResponse
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public CASResponse cas(String key, long casId, Object value) {
return cas(key, casId, value, transcoder);
}
/**
* Perform a synchronous CAS operation with the default transcoder.
*
* @param key the key
* @param casId the CAS identifier (from a gets operation)
* @param exp the expiration of this object
* @param value the new value
* @return a CASResponse
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public CASResponse cas(String key, long casId, int exp, Object value) {
return cas(key, casId, exp, value, transcoder);
}
/**
* Add an object to the cache iff it does not exist already.
*
*
* The {@code exp} value is passed along to memcached exactly as given,
* and will be processed per the memcached protocol specification:
*
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
*
*
* The actual value sent may either be Unix time (number of seconds since
* January 1, 1970, as a 32-bit value), or a number of seconds starting from
* current time. In the latter case, this number of seconds may not exceed
* 60*60*24*30 (number of seconds in 30 days); if the number sent by a client
* is larger than that, the server will consider it to be real Unix time value
* rather than an offset from current time.
*
*
*
* @param
* @param key the key under which this object should be added.
* @param exp the expiration of this object
* @param o the object to store
* @param tc the transcoder to serialize and unserialize the value
* @return a future representing the processing of this operation
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture add(String key, int exp, T o,
Transcoder tc) {
return asyncStore(StoreType.add, key, exp, o, tc);
}
/**
* Add an object to the cache (using the default transcoder) iff it does not
* exist already.
*
*
* The {@code exp} value is passed along to memcached exactly as given,
* and will be processed per the memcached protocol specification:
*
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
*
*
* The actual value sent may either be Unix time (number of seconds since
* January 1, 1970, as a 32-bit value), or a number of seconds starting from
* current time. In the latter case, this number of seconds may not exceed
* 60*60*24*30 (number of seconds in 30 days); if the number sent by a client
* is larger than that, the server will consider it to be real Unix time value
* rather than an offset from current time.
*
*
*
* @param key the key under which this object should be added.
* @param exp the expiration of this object
* @param o the object to store
* @return a future representing the processing of this operation
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture add(String key, int exp, Object o) {
return asyncStore(StoreType.add, key, exp, o, transcoder);
}
/**
* Set an object in the cache regardless of any existing value.
*
*
* The {@code exp} value is passed along to memcached exactly as given,
* and will be processed per the memcached protocol specification:
*
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
*
*
* The actual value sent may either be Unix time (number of seconds since
* January 1, 1970, as a 32-bit value), or a number of seconds starting from
* current time. In the latter case, this number of seconds may not exceed
* 60*60*24*30 (number of seconds in 30 days); if the number sent by a client
* is larger than that, the server will consider it to be real Unix time value
* rather than an offset from current time.
*
*
*
* @param
* @param key the key under which this object should be added.
* @param exp the expiration of this object
* @param o the object to store
* @param tc the transcoder to serialize and unserialize the value
* @return a future representing the processing of this operation
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture set(String key, int exp, T o,
Transcoder tc) {
return asyncStore(StoreType.set, key, exp, o, tc);
}
/**
* Set an object in the cache (using the default transcoder) regardless of any
* existing value.
*
*
* The {@code exp} value is passed along to memcached exactly as given,
* and will be processed per the memcached protocol specification:
*
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
*
*
* The actual value sent may either be Unix time (number of seconds since
* January 1, 1970, as a 32-bit value), or a number of seconds starting from
* current time. In the latter case, this number of seconds may not exceed
* 60*60*24*30 (number of seconds in 30 days); if the number sent by a client
* is larger than that, the server will consider it to be real Unix time value
* rather than an offset from current time.
*
*
*
* @param key the key under which this object should be added.
* @param exp the expiration of this object
* @param o the object to store
* @return a future representing the processing of this operation
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture set(String key, int exp, Object o) {
return asyncStore(StoreType.set, key, exp, o, transcoder);
}
/**
* Replace an object with the given value iff there is already a value for the
* given key.
*
*
* The {@code exp} value is passed along to memcached exactly as given,
* and will be processed per the memcached protocol specification:
*
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
*
*
* The actual value sent may either be Unix time (number of seconds since
* January 1, 1970, as a 32-bit value), or a number of seconds starting from
* current time. In the latter case, this number of seconds may not exceed
* 60*60*24*30 (number of seconds in 30 days); if the number sent by a client
* is larger than that, the server will consider it to be real Unix time value
* rather than an offset from current time.
*
*
*
* @param
* @param key the key under which this object should be added.
* @param exp the expiration of this object
* @param o the object to store
* @param tc the transcoder to serialize and unserialize the value
* @return a future representing the processing of this operation
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture replace(String key, int exp, T o,
Transcoder tc) {
return asyncStore(StoreType.replace, key, exp, o, tc);
}
/**
* Replace an object with the given value (transcoded with the default
* transcoder) iff there is already a value for the given key.
*
*
* The {@code exp} value is passed along to memcached exactly as given,
* and will be processed per the memcached protocol specification:
*
*
*
* Note that the return will be false any time a mutation has not occurred.
*
*
*
*
* The actual value sent may either be Unix time (number of seconds since
* January 1, 1970, as a 32-bit value), or a number of seconds starting from
* current time. In the latter case, this number of seconds may not exceed
* 60*60*24*30 (number of seconds in 30 days); if the number sent by a client
* is larger than that, the server will consider it to be real Unix time value
* rather than an offset from current time.
*
*
*
* @param key the key under which this object should be added.
* @param exp the expiration of this object
* @param o the object to store
* @return a future representing the processing of this operation
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture replace(String key, int exp, Object o) {
return asyncStore(StoreType.replace, key, exp, o, transcoder);
}
/**
* Get the given key asynchronously.
*
* @param
* @param key the key to fetch
* @param tc the transcoder to serialize and unserialize value
* @return a future that will hold the return value of the fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public GetFuture asyncGet(final String key, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final GetFuture rv = new GetFuture(latch, operationTimeout, key,
executorService);
Operation op = opFact.get(key, new GetOperation.Callback() {
private Future val;
@Override
public void receivedStatus(OperationStatus status) {
rv.set(val, status);
}
@Override
public void gotData(String k, int flags, byte[] data) {
assert key.equals(k) : "Wrong key returned";
val =
tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize()));
}
@Override
public void complete() {
latch.countDown();
rv.signalComplete();
}
});
rv.setOperation(op);
enqueueOperation(key, op);
return rv;
}
/**
* Get with a single key from the specified node.
*
* @param
* @param key the key to get
* @param tc the transcoder to serialize and unserialize value
* @return the result from the cache (null if there is none)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
T get(InetSocketAddress sa, final String key, final Transcoder tc) {
try{
return asyncGet(sa, key, tc).get(operationTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for value", e);
} catch (ExecutionException e) {
throw new RuntimeException("Exception waiting for value", e);
} catch (TimeoutException e) {
throw new OperationTimeoutException("Timeout waiting for value", e);
}
}
/**
* Get the given key from the specified node.
* @param
* @param sa - The InetSocketAddress of the node from which to fetch the key
* @param key the key to fetch
* @param transcoder the transcoder to serialize and unserialize value
* @return a future that will hold the return value of the fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
GetFuture asyncGet(InetSocketAddress sa, final String key, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final GetFuture rv = new GetFuture(latch, operationTimeout, key, executorService);
Operation op = opFact.get(key, new GetOperation.Callback() {
private Future val = null;
public void receivedStatus(OperationStatus status) {
rv.set(val, status);
}
public void gotData(String k, int flags, byte[] data) {
assert key.equals(k) : "Wrong key returned";
val =
tcService.decode(tc, new CachedData(flags, data, transcoder.getMaxSize()));
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
mconn.enqueueOperation(sa, op);
return rv;
}
/**
* Get the given key asynchronously and decode with the default transcoder.
*
* @param key the key to fetch
* @return a future that will hold the return value of the fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public GetFuture asyncGet(final String key) {
return asyncGet(key, transcoder);
}
/**
* Gets (with CAS support) the given key asynchronously.
*
* @param
* @param key the key to fetch
* @param tc the transcoder to serialize and unserialize value
* @return a future that will hold the return value of the fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture> asyncGets(final String key,
final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture> rv =
new OperationFuture>(key, latch, operationTimeout,
executorService);
Operation op = opFact.gets(key, new GetsOperation.Callback() {
private CASValue val;
@Override
public void receivedStatus(OperationStatus status) {
rv.set(val, status);
}
@Override
public void gotData(String k, int flags, long cas, byte[] data) {
assert key.equals(k) : "Wrong key returned";
val =
new CASValue(cas, tc.decode(new CachedData(flags, data,
tc.getMaxSize())));
}
@Override
public void complete() {
latch.countDown();
rv.signalComplete();
}
});
rv.setOperation(op);
enqueueOperation(key, op);
return rv;
}
/**
* Gets (with CAS support) the given key asynchronously and decode using the
* default transcoder.
*
* @param key the key to fetch
* @return a future that will hold the return value of the fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture> asyncGets(final String key) {
return asyncGets(key, transcoder);
}
/**
* Gets (with CAS support) with a single key.
*
* @param
* @param key the key to get
* @param tc the transcoder to serialize and unserialize value
* @return the result from the cache and CAS id (null if there is none)
* @throws OperationTimeoutException if global operation timeout is exceeded
* @throws CancellationException if operation was canceled
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public CASValue gets(String key, Transcoder tc) {
try {
return asyncGets(key, tc).get(operationTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for value", e);
} catch (ExecutionException e) {
if(e.getCause() instanceof CancellationException) {
throw (CancellationException) e.getCause();
} else {
throw new RuntimeException("Exception waiting for value", e);
}
} catch (TimeoutException e) {
throw new OperationTimeoutException("Timeout waiting for value", e);
}
}
/**
* Get with a single key and reset its expiration.
*
* @param
* @param key the key to get
* @param exp the new expiration for the key
* @param tc the transcoder to serialize and unserialize value
* @return the result from the cache (null if there is none)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws CancellationException if operation was canceled
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public CASValue getAndTouch(String key, int exp, Transcoder tc) {
try {
return asyncGetAndTouch(key, exp, tc).get(operationTimeout,
TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for value", e);
} catch (ExecutionException e) {
if(e.getCause() instanceof CancellationException) {
throw (CancellationException) e.getCause();
} else {
throw new RuntimeException("Exception waiting for value", e);
}
} catch (TimeoutException e) {
throw new OperationTimeoutException("Timeout waiting for value", e);
}
}
/**
* Get a single key and reset its expiration using the default transcoder.
*
* @param key the key to get
* @param exp the new expiration for the key
* @return the result from the cache and CAS id (null if there is none)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public CASValue getAndTouch(String key, int exp) {
return getAndTouch(key, exp, transcoder);
}
/**
* Gets (with CAS support) with a single key using the default transcoder.
*
* @param key the key to get
* @return the result from the cache and CAS id (null if there is none)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public CASValue gets(String key) {
return gets(key, transcoder);
}
/**
* Get with a single key.
*
* @param
* @param key the key to get
* @param tc the transcoder to serialize and unserialize value
* @return the result from the cache (null if there is none)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws CancellationException if operation was canceled
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public T get(String key, Transcoder tc) {
try {
return asyncGet(key, tc).get(operationTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for value", e);
} catch (ExecutionException e) {
if(e.getCause() instanceof CancellationException) {
throw (CancellationException) e.getCause();
} else {
throw new RuntimeException("Exception waiting for value", e);
}
} catch (TimeoutException e) {
throw new OperationTimeoutException("Timeout waiting for value: "
+ buildTimeoutMessage(operationTimeout, TimeUnit.MILLISECONDS), e);
}
}
/**
* Get with a single key and decode using the default transcoder.
*
* @param key the key to get
* @return the result from the cache (null if there is none)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Object get(String key) {
return get(key, transcoder);
}
/**
* Asynchronously get a bunch of objects from the cache.
*
* @param
* @param keyIter Iterator that produces keys.
* @param tcIter an iterator of transcoders to serialize and unserialize
* values; the transcoders are matched with the keys in the same
* order. The minimum of the key collection length and number of
* transcoders is used and no exception is thrown if they do not
* match
* @return a Future result of that fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public BulkFuture> asyncGetBulk(Iterator keyIter,
Iterator> tcIter) {
final Map> m = new ConcurrentHashMap>();
// This map does not need to be a ConcurrentHashMap
// because it is fully populated when it is used and
// used only to read the transcoder for a key.
final Map> tcMap =
new HashMap>();
// Break the gets down into groups by key
final Map> chunks =
new HashMap>();
final NodeLocator locator = mconn.getLocator();
while (keyIter.hasNext() && tcIter.hasNext()) {
String key = keyIter.next();
tcMap.put(key, tcIter.next());
StringUtils.validateKey(key, opFact instanceof BinaryOperationFactory);
final MemcachedNode primaryNode = locator.getPrimary(key);
MemcachedNode node = null;
if (primaryNode.isActive()) {
node = primaryNode;
} else {
for (Iterator i = locator.getSequence(key); node == null
&& i.hasNext();) {
MemcachedNode n = i.next();
if (n.isActive()) {
node = n;
}
}
if (node == null) {
node = primaryNode;
}
}
assert node != null : "Didn't find a node for " + key;
Collection ks = chunks.get(node);
if (ks == null) {
ks = new ArrayList();
chunks.put(node, ks);
}
ks.add(key);
}
final AtomicInteger pendingChunks = new AtomicInteger(chunks.size());
int initialLatchCount = chunks.isEmpty() ? 0 : 1;
final CountDownLatch latch = new CountDownLatch(initialLatchCount);
final Collection ops = new ArrayList(chunks.size());
final BulkGetFuture rv = new BulkGetFuture(m, ops, latch, executorService);
GetOperation.Callback cb = new GetOperation.Callback() {
@Override
@SuppressWarnings("synthetic-access")
public void receivedStatus(OperationStatus status) {
if (status.getStatusCode() == StatusCode.ERR_NOT_MY_VBUCKET) {
pendingChunks.addAndGet(Integer.parseInt(status.getMessage()));
}
rv.setStatus(status);
}
@Override
public void gotData(String k, int flags, byte[] data) {
Transcoder tc = tcMap.get(k);
m.put(k,
tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize())));
}
@Override
public void complete() {
if (pendingChunks.decrementAndGet() <= 0) {
latch.countDown();
rv.signalComplete();
}
}
};
// Now that we know how many servers it breaks down into, and the latch
// is all set up, convert all of these strings collections to operations
final Map mops =
new HashMap();
for (Map.Entry> me : chunks.entrySet()) {
Operation op = opFact.get(me.getValue(), cb);
mops.put(me.getKey(), op);
ops.add(op);
}
assert mops.size() == chunks.size();
mconn.checkState();
mconn.addOperations(mops);
return rv;
}
/**
* Asynchronously get a bunch of objects from the cache.
*
* @param
* @param keys the keys to request
* @param tcIter an iterator of transcoders to serialize and unserialize
* values; the transcoders are matched with the keys in the same
* order. The minimum of the key collection length and number of
* transcoders is used and no exception is thrown if they do not
* match
* @return a Future result of that fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public BulkFuture> asyncGetBulk(Collection keys,
Iterator> tcIter) {
return asyncGetBulk(keys.iterator(), tcIter);
}
/**
* Asynchronously get a bunch of objects from the cache.
*
* @param
* @param keyIter Iterator for the keys to request
* @param tc the transcoder to serialize and unserialize values
* @return a Future result of that fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public BulkFuture> asyncGetBulk(Iterator keyIter,
Transcoder tc) {
return asyncGetBulk(keyIter,
new SingleElementInfiniteIterator>(tc));
}
/**
* Asynchronously get a bunch of objects from the cache.
*
* @param
* @param keys the keys to request
* @param tc the transcoder to serialize and unserialize values
* @return a Future result of that fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public BulkFuture> asyncGetBulk(Collection keys,
Transcoder tc) {
return asyncGetBulk(keys, new SingleElementInfiniteIterator>(
tc));
}
/**
* Asynchronously get a bunch of objects from the cache and decode them with
* the given transcoder.
*
* @param keyIter Iterator that produces the keys to request
* @return a Future result of that fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public BulkFuture> asyncGetBulk(
Iterator keyIter) {
return asyncGetBulk(keyIter, transcoder);
}
/**
* Asynchronously get a bunch of objects from the cache and decode them with
* the given transcoder.
*
* @param keys the keys to request
* @return a Future result of that fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public BulkFuture> asyncGetBulk(Collection keys) {
return asyncGetBulk(keys, transcoder);
}
/**
* Varargs wrapper for asynchronous bulk gets.
*
* @param
* @param tc the transcoder to serialize and unserialize value
* @param keys one more more keys to get
* @return the future values of those keys
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public BulkFuture> asyncGetBulk(Transcoder tc,
String... keys) {
return asyncGetBulk(Arrays.asList(keys), tc);
}
/**
* Varargs wrapper for asynchronous bulk gets with the default transcoder.
*
* @param keys one more more keys to get
* @return the future values of those keys
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public BulkFuture> asyncGetBulk(String... keys) {
return asyncGetBulk(Arrays.asList(keys), transcoder);
}
/**
* Get the given key to reset its expiration time.
*
* @param key the key to fetch
* @param exp the new expiration to set for the given key
* @return a future that will hold the return value of the fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture> asyncGetAndTouch(final String key,
final int exp) {
return asyncGetAndTouch(key, exp, transcoder);
}
/**
* Get the given key to reset its expiration time.
*
* @param key the key to fetch
* @param exp the new expiration to set for the given key
* @param tc the transcoder to serialize and unserialize value
* @return a future that will hold the return value of the fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture> asyncGetAndTouch(final String key,
final int exp, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture> rv = new OperationFuture>(
key, latch, operationTimeout, executorService);
Operation op = opFact.getAndTouch(key, exp,
new GetAndTouchOperation.Callback() {
private CASValue val;
@Override
public void receivedStatus(OperationStatus status) {
rv.set(val, status);
}
@Override
public void complete() {
latch.countDown();
rv.signalComplete();
}
@Override
public void gotData(String k, int flags, long cas, byte[] data) {
assert k.equals(key) : "Wrong key returned";
val =
new CASValue(cas, tc.decode(new CachedData(flags, data,
tc.getMaxSize())));
}
});
rv.setOperation(op);
enqueueOperation(key, op);
return rv;
}
/**
* Get the values for multiple keys from the cache.
*
* @param
* @param keyIter Iterator that produces the keys
* @param tc the transcoder to serialize and unserialize value
* @return a map of the values (for each value that exists)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws CancellationException if operation was canceled
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map getBulk(Iterator keyIter,
Transcoder tc) {
try {
return asyncGetBulk(keyIter, tc).get(operationTimeout,
TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted getting bulk values", e);
} catch (ExecutionException e) {
if(e.getCause() instanceof CancellationException) {
throw (CancellationException) e.getCause();
} else {
throw new RuntimeException("Exception waiting for bulk values", e);
}
} catch (TimeoutException e) {
throw new OperationTimeoutException("Timeout waiting for bulk values: "
+ buildTimeoutMessage(operationTimeout, TimeUnit.MILLISECONDS), e);
}
}
/**
* Get the values for multiple keys from the cache.
*
* @param keyIter Iterator that produces the keys
* @return a map of the values (for each value that exists)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map getBulk(Iterator keyIter) {
return getBulk(keyIter, transcoder);
}
/**
* Get the values for multiple keys from the cache.
*
* @param
* @param keys the keys
* @param tc the transcoder to serialize and unserialize value
* @return a map of the values (for each value that exists)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map getBulk(Collection keys,
Transcoder tc) {
return getBulk(keys.iterator(), tc);
}
/**
* Get the values for multiple keys from the cache.
*
* @param keys the keys
* @return a map of the values (for each value that exists)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map getBulk(Collection keys) {
return getBulk(keys, transcoder);
}
/**
* Get the values for multiple keys from the cache.
*
* @param
* @param tc the transcoder to serialize and unserialize value
* @param keys the keys
* @return a map of the values (for each value that exists)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map getBulk(Transcoder tc, String... keys) {
return getBulk(Arrays.asList(keys), tc);
}
/**
* Get the values for multiple keys from the cache.
*
* @param keys the keys
* @return a map of the values (for each value that exists)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map getBulk(String... keys) {
return getBulk(Arrays.asList(keys), transcoder);
}
private void enqueueOperation(String key, Operation op){
checkState();
mconn.enqueueOperation(key, op);
}
private void checkState() {
if (clientMode == ClientMode.Dynamic && !isConfigurationInitialized) {
throw new IllegalStateException("Client is not initialized");
}
}
/**
* Get the config
*
* @param addr - The node from which to retrieve the configuration
* @param type - config to get
* @return the result from the server.
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
public Object getConfig(InetSocketAddress addr, ConfigurationType type) {
return getConfig(addr, type, transcoder);
}
/**
* Get the config using the config protocol.
* The command format is "config get <type>"
* @param addr - The node from which to retrieve the configuration
* @param type config to get
* @param tc the transcoder to serialize and unserialize value
* @return the result from the server (null if there is none)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
public T getConfig(InetSocketAddress addr, ConfigurationType type, Transcoder tc) {
try {
return asyncGetConfig(addr, type, tc).get(operationTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for config", e);
} catch(OperationNotSupportedException e){
throw e;
} catch (ExecutionException e) {
if(e.getCause() instanceof OperationException){
OperationException exp = (OperationException)e.getCause();
if(OperationErrorType.GENERAL.equals(exp.getType())){
throw new OperationNotSupportedException("This version of getConfig command is not supported.");
}
}
throw new RuntimeException("Exception waiting for config", e);
} catch (TimeoutException e) {
throw new OperationTimeoutException("Timeout waiting for config", e);
}
}
/**
* Get the given configurationType asynchronously.
*
* @param addr - The node from which to retrieve the configuration
* @param type the configurationType to fetch
* @param tc the transcoder to serialize and unserialize value
* @return a future that will hold the return value of the fetch
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
public GetConfigFuture asyncGetConfig(InetSocketAddress addr, final ConfigurationType type, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final GetConfigFuture rv = new GetConfigFuture(latch, operationTimeout, type, executorService);
Operation op = opFact.getConfig(type, new GetConfigOperation.Callback() {
private Future val = null;
public void receivedStatus(OperationStatus status) {
rv.set(val, status);
}
public void gotData(ConfigurationType configurationType, int flags, byte[] data) {
assert type.equals(configurationType) : "Wrong type returned";
val =
tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize()));
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
mconn.enqueueOperation(addr, op);
return rv;
}
/**
* Sets the configuration in the cache node for the specified configurationType.
*
* @param addr - The node where the configuration is set.
* @param configurationType the type under which this configuration should be added.
* @param o the configuration to store
* @return a future representing the processing of this operation
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
public OperationFuture setConfig(InetSocketAddress addr, ConfigurationType configurationType, Object o) {
return asyncSetConfig(addr, configurationType, o, transcoder);
}
/**
* Sets the configuration in the cache node for the specified configurationType.
*
* @param addr - The node where the configuration is set.
* @param configurationType the type under which this configuration should be added.
* @param o the configuration to store
* @param tc the transcoder to serialize and unserialize the configuration
* @return a future representing the processing of this operation
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
public OperationFuture setConfig(InetSocketAddress addr, ConfigurationType configurationType, Object o, Transcoder tc) {
return asyncSetConfig(addr, configurationType, o, transcoder);
}
private OperationFuture asyncSetConfig(InetSocketAddress addr,
ConfigurationType configurationType, T value, Transcoder tc) {
CachedData co = tc.encode(value);
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture rv =
new OperationFuture(configurationType.getValue(), latch, operationTimeout, executorService);
Operation op = opFact.setConfig(configurationType, co.getFlags(), co.getData(),
new OperationCallback() {
public void receivedStatus(OperationStatus val) {
rv.set(val.isSuccess(), val);
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
mconn.enqueueOperation(addr, op);
return rv;
}
/**
* Delete the given configurationType from the cache server.
*
* @param addr - The node in which the configuration is deleted.
* @param configurationType the configurationType to delete
* @return whether or not the operation was performed
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
public OperationFuture deleteConfig(InetSocketAddress addr, ConfigurationType configurationType) {
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture rv = new OperationFuture(configurationType.getValue(),
latch, operationTimeout, executorService);
DeleteConfigOperation op = opFact.deleteConfig(configurationType, new OperationCallback() {
public void receivedStatus(OperationStatus s) {
rv.set(s.isSuccess(), s);
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
mconn.enqueueOperation(addr, op);
return rv;
}
/**
* Get the versions of all of the connected memcacheds.
*
* @return a Map of SocketAddress to String for connected servers
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map getVersions() {
final Map rv =
new ConcurrentHashMap();
CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() {
@Override
public Operation newOp(final MemcachedNode n,
final CountDownLatch latch) {
final SocketAddress sa = n.getSocketAddress();
return opFact.version(new OperationCallback() {
@Override
public void receivedStatus(OperationStatus s) {
rv.put(sa, s.getMessage());
}
@Override
public void complete() {
latch.countDown();
}
});
}
});
try {
blatch.await(operationTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for versions", e);
}
return rv;
}
/**
* Get all of the stats from all of the connections.
*
* @return a Map of a Map of stats replies by SocketAddress
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map> getStats() {
return getStats(null);
}
/**
* Get a set of stats from all connections.
*
* @param arg which stats to get
* @return a Map of the server SocketAddress to a map of String stat keys to
* String stat values.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public Map> getStats(final String arg) {
final Map> rv =
new HashMap>();
CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() {
@Override
public Operation newOp(final MemcachedNode n,
final CountDownLatch latch) {
final SocketAddress sa = n.getSocketAddress();
rv.put(sa, new HashMap());
return opFact.stats(arg, new StatsOperation.Callback() {
@Override
public void gotStat(String name, String val) {
rv.get(sa).put(name, val);
}
@Override
@SuppressWarnings("synthetic-access")
public void receivedStatus(OperationStatus status) {
if (!status.isSuccess()) {
getLogger().warn("Unsuccessful stat fetch: %s", status);
}
}
@Override
public void complete() {
latch.countDown();
}
});
}
});
try {
blatch.await(operationTimeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for stats", e);
}
return rv;
}
private long mutate(Mutator m, String key, long by, long def, int exp) {
final AtomicLong rv = new AtomicLong();
final CountDownLatch latch = new CountDownLatch(1);
enqueueOperation(key, opFact.mutate(m, key, by, def, exp,
new OperationCallback() {
@Override
public void receivedStatus(OperationStatus s) {
// XXX: Potential abstraction leak.
// The handling of incr/decr in the binary protocol
// Allows us to avoid string processing.
rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1"));
}
@Override
public void complete() {
latch.countDown();
}
}));
try {
if (!latch.await(operationTimeout, TimeUnit.MILLISECONDS)) {
throw new OperationTimeoutException("Mutate operation timed out,"
+ "unable to modify counter [" + key + ']');
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted", e);
}
getLogger().debug("Mutation returned %s", rv);
return rv.get();
}
/**
* Increment the given key by the given amount.
*
* Due to the way the memcached server operates on items, incremented and
* decremented items will be returned as Strings with any operations that
* return a value.
*
* @param key the key
* @param by the amount to increment
* @return the new value (-1 if the key doesn't exist)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public long incr(String key, long by) {
return mutate(Mutator.incr, key, by, 0, -1);
}
/**
* Increment the given key by the given amount.
*
* Due to the way the memcached server operates on items, incremented and
* decremented items will be returned as Strings with any operations that
* return a value.
*
* @param key the key
* @param by the amount to increment
* @return the new value (-1 if the key doesn't exist)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public long incr(String key, int by) {
return mutate(Mutator.incr, key, by, 0, -1);
}
/**
* Decrement the given key by the given value.
*
* Due to the way the memcached server operates on items, incremented and
* decremented items will be returned as Strings with any operations that
* return a value.
*
* @param key the key
* @param by the value
* @return the new value (-1 if the key doesn't exist)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public long decr(String key, long by) {
return mutate(Mutator.decr, key, by, 0, -1);
}
/**
* Decrement the given key by the given value.
*
* Due to the way the memcached server operates on items, incremented and
* decremented items will be returned as Strings with any operations that
* return a value.
*
* @param key the key
* @param by the value
* @return the new value (-1 if the key doesn't exist)
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public long decr(String key, int by) {
return mutate(Mutator.decr, key, by, 0, -1);
}
/**
* Increment the given counter, returning the new value.
*
* Due to the way the memcached server operates on items, incremented and
* decremented items will be returned as Strings with any operations that
* return a value.
*
* @param key the key
* @param by the amount to increment
* @param def the default value (if the counter does not exist)
* @param exp the expiration of this object
* @return the new value, or -1 if we were unable to increment or add
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public long incr(String key, long by, long def, int exp) {
return mutateWithDefault(Mutator.incr, key, by, def, exp);
}
/**
* Increment the given counter, returning the new value.
*
* Due to the way the memcached server operates on items, incremented and
* decremented items will be returned as Strings with any operations that
* return a value.
*
* @param key the key
* @param by the amount to increment
* @param def the default value (if the counter does not exist)
* @param exp the expiration of this object
* @return the new value, or -1 if we were unable to increment or add
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public long incr(String key, int by, long def, int exp) {
return mutateWithDefault(Mutator.incr, key, by, def, exp);
}
/**
* Decrement the given counter, returning the new value.
*
* Due to the way the memcached server operates on items, incremented and
* decremented items will be returned as Strings with any operations that
* return a value.
*
* @param key the key
* @param by the amount to decrement
* @param def the default value (if the counter does not exist)
* @param exp the expiration of this object
* @return the new value, or -1 if we were unable to decrement or add
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public long decr(String key, long by, long def, int exp) {
return mutateWithDefault(Mutator.decr, key, by, def, exp);
}
/**
* Decrement the given counter, returning the new value.
*
* Due to the way the memcached server operates on items, incremented and
* decremented items will be returned as Strings with any operations that
* return a value.
*
* @param key the key
* @param by the amount to decrement
* @param def the default value (if the counter does not exist)
* @param exp the expiration of this object
* @return the new value, or -1 if we were unable to decrement or add
* @throws OperationTimeoutException if the global operation timeout is
* exceeded
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public long decr(String key, int by, long def, int exp) {
return mutateWithDefault(Mutator.decr, key, by, def, exp);
}
private long mutateWithDefault(Mutator t, String key, long by, long def,
int exp) {
long rv = mutate(t, key, by, def, exp);
// The ascii protocol doesn't support defaults, so I added them
// manually here.
if (rv == -1) {
Future f = asyncStore(StoreType.add, key, exp,
String.valueOf(def));
try {
if (f.get(operationTimeout, TimeUnit.MILLISECONDS)) {
rv = def;
} else {
rv = mutate(t, key, by, 0, exp);
assert rv != -1 : "Failed to mutate or init value";
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted waiting for store", e);
} catch (ExecutionException e) {
if(e.getCause() instanceof CancellationException) {
throw (CancellationException) e.getCause();
} else {
throw new RuntimeException("Failed waiting for store", e);
}
} catch (TimeoutException e) {
throw new OperationTimeoutException("Timeout waiting to mutate or init"
+ " value" + buildTimeoutMessage(operationTimeout,
TimeUnit.MILLISECONDS), e);
}
}
return rv;
}
private OperationFuture asyncMutate(Mutator m, String key, long by,
long def, int exp) {
if (!(opFact instanceof BinaryOperationFactory) && (def != 0 || exp != -1)) {
throw new UnsupportedOperationException("Default value or expiration "
+ "time are not supported on the async mutate methods. Use either the "
+ "binary protocol or the sync variant.");
}
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture rv =
new OperationFuture(key, latch, operationTimeout, executorService);
Operation op = opFact.mutate(m, key, by, def, exp,
new OperationCallback() {
@Override
public void receivedStatus(OperationStatus s) {
rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1"), s);
}
@Override
public void complete() {
latch.countDown();
rv.signalComplete();
}
});
enqueueOperation(key, op);
rv.setOperation(op);
return rv;
}
/**
* Asychronous increment.
*
* @param key key to increment
* @param by the amount to increment the value by
* @return a future with the incremented value, or -1 if the increment failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncIncr(String key, long by) {
return asyncMutate(Mutator.incr, key, by, 0, -1);
}
/**
* Asychronous increment.
*
* @param key key to increment
* @param by the amount to increment the value by
* @return a future with the incremented value, or -1 if the increment failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncIncr(String key, int by) {
return asyncMutate(Mutator.incr, key, by, 0, -1);
}
/**
* Asynchronous decrement.
*
* @param key key to decrement
* @param by the amount to decrement the value by
* @return a future with the decremented value, or -1 if the decrement failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncDecr(String key, long by) {
return asyncMutate(Mutator.decr, key, by, 0, -1);
}
/**
* Asynchronous decrement.
*
* @param key key to decrement
* @param by the amount to decrement the value by
* @return a future with the decremented value, or -1 if the decrement failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncDecr(String key, int by) {
return asyncMutate(Mutator.decr, key, by, 0, -1);
}
/**
* Asychronous increment.
*
* @param key key to increment
* @param by the amount to increment the value by
* @param def the default value (if the counter does not exist)
* @param exp the expiration of this object
* @return a future with the incremented value, or -1 if the increment failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncIncr(String key, long by, long def,
int exp) {
return asyncMutate(Mutator.incr, key, by, def, exp);
}
/**
* Asychronous increment.
*
* @param key key to increment
* @param by the amount to increment the value by
* @param def the default value (if the counter does not exist)
* @param exp the expiration of this object
* @return a future with the incremented value, or -1 if the increment failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncIncr(String key, int by, long def,
int exp) {
return asyncMutate(Mutator.incr, key, by, def, exp);
}
/**
* Asynchronous decrement.
*
* @param key key to decrement
* @param by the amount to decrement the value by
* @param def the default value (if the counter does not exist)
* @param exp the expiration of this object
* @return a future with the decremented value, or -1 if the decrement failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncDecr(String key, long by, long def,
int exp) {
return asyncMutate(Mutator.decr, key, by, def, exp);
}
/**
* Asynchronous decrement.
*
* @param key key to decrement
* @param by the amount to decrement the value by
* @param def the default value (if the counter does not exist)
* @param exp the expiration of this object
* @return a future with the decremented value, or -1 if the decrement failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncDecr(String key, int by, long def,
int exp) {
return asyncMutate(Mutator.decr, key, by, def, exp);
}
/**
* Asychronous increment.
*
* @param key key to increment
* @param by the amount to increment the value by
* @param def the default value (if the counter does not exist)
* @return a future with the incremented value, or -1 if the increment failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncIncr(String key, long by, long def) {
return asyncMutate(Mutator.incr, key, by, def, 0);
}
/**
* Asychronous increment.
*
* @param key key to increment
* @param by the amount to increment the value by
* @param def the default value (if the counter does not exist)
* @return a future with the incremented value, or -1 if the increment failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture asyncIncr(String key, int by, long def) {
return asyncMutate(Mutator.incr, key, by, def, 0);
}
/**
* Asynchronous decrement.
*
* @param key key to decrement
* @param by the amount to decrement the value by
* @param def the default value (if the counter does not exist)
* @return a future with the decremented value, or -1 if the decrement failed.
* @throws IllegalStateException in the rare circumstance where queue is too
* full to accept any more requests
*/
@Override
public OperationFuture