All Downloads are FREE. Search and download functionalities are using the official Maven repository.

net.spy.memcached.MemcachedClient Maven / Gradle / Ivy

Go to download

Amazon ElastiCache Cluster Client is an enhanced Java library to connect to ElastiCache clusters. This client library has been built upon Spymemcached and is released under the Amazon Software License.

There is a newer version: 1.2.2
Show newest version
/**
 * Copyright (C) 2006-2009 Dustin Sallings
 * Copyright (C) 2009-2011 Couchbase, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
 * IN THE SOFTWARE.
 * 
 * 
 * Portions Copyright (C) 2012-2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Amazon Software License (the "License"). You may not use this 
 * file except in compliance with the License. A copy of the License is located at
 *  http://aws.amazon.com/asl/
 * or in the "license" file accompanying this file. This file is distributed on 
 * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
 * implied. See the License for the specific language governing permissions and 
 * limitations under the License. 
 */

package net.spy.memcached;

import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;

import net.spy.memcached.auth.AuthDescriptor;
import net.spy.memcached.auth.AuthThreadMonitor;
import net.spy.memcached.compat.SpyObject;
import net.spy.memcached.config.ClusterConfiguration;
import net.spy.memcached.ConfigurationPoller;
import net.spy.memcached.config.NodeEndPoint;
import net.spy.memcached.internal.BulkFuture;
import net.spy.memcached.internal.BulkGetFuture;
import net.spy.memcached.internal.GetConfigFuture;
import net.spy.memcached.internal.GetFuture;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.internal.SingleElementInfiniteIterator;
import net.spy.memcached.ops.CASOperationStatus;
import net.spy.memcached.ops.CancelledOperationStatus;
import net.spy.memcached.ops.ConcatenationType;
import net.spy.memcached.ops.ConfigurationType;
import net.spy.memcached.ops.DeleteConfigOperation;
import net.spy.memcached.ops.DeleteOperation;
import net.spy.memcached.ops.GetAndTouchOperation;
import net.spy.memcached.ops.GetConfigOperation;
import net.spy.memcached.ops.GetOperation;
import net.spy.memcached.ops.GetsOperation;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationErrorType;
import net.spy.memcached.ops.OperationException;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatsOperation;
import net.spy.memcached.ops.StoreType;
import net.spy.memcached.ops.TimedOutOperationStatus;
import net.spy.memcached.transcoders.SerializingTranscoder;
import net.spy.memcached.transcoders.TranscodeService;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.util.StringUtils;

/**
 * Client to a memcached server.
 *
 * 

Basic usage

* *
 * The Client can be run in static mode or dynamic mode. In basic usage the mode is automatically 
 * determined based on the endpoint specified. If the endpoint has cfg subdomain, then the client is
 * initialized in dynamic mode.
 *  
 * // Use dynamic mode to leverage Elasticache Autodiscovery feature.
 * // In dynamic mode, the number of servers in the cluster and their endpoint details are automatically picked up
 * // using the configuration endpoint of the elasticache cluster.
 * MemcachedClient c = new MemcachedClient(
 *    new InetSocketAddress("configurationEndpoint", portNum));
 *
 * // Store a value (async) for one hour
 * c.set("someKey", 3600, someObject);
 * // Retrieve a value.
 * Object myObject = c.get("someKey");
 * 
* * In the basic usage with out connection factory, the client mode is automatically determined * *

Advanced Usage

* *

* MemcachedClient may be processing a great deal of asynchronous messages or * possibly dealing with an unreachable memcached, which may delay processing. * If a memcached is disabled, for example, MemcachedConnection will continue to * attempt to reconnect and replay pending operations until it comes back up. To * prevent this from causing your application to hang, you can use one of the * asynchronous mechanisms to time out a request and cancel the operation to the * server. *

* *
 *      // Get a memcached client connected over the binary protocol
 *      // The number of servers in the cluster and their endpoint details are automatically picked up
 *      // using the configuration endpoint of the elasticache cluster. 
 *      MemcachedClient c = new MemcachedClient(new BinaryConnectionFactory(ClientMode.Dynamic),
 *              AddrUtil.getAddresses("configurationEndpoint:11211"));
 *              // or //
 *  // For operating with out the autodiscovery feature, use static mode(ClientMode.Static)
 *      MemcachedClient c = new MemcachedClient(new BinaryConnectionFactory(ClientMode.Static),
 *              AddrUtil.getAddresses("configurationEndpoint:11211"));
 *
 *      // Try to get a value, for up to 5 seconds, and cancel if it
 *      // doesn't return
 *      Object myObj = null;
 *      Future<Object> f = c.asyncGet("someKey");
 *      try {
 *          myObj = f.get(5, TimeUnit.SECONDS);
 *      // throws expecting InterruptedException, ExecutionException
 *      // or TimeoutException
 *      } catch (Exception e) {  /*  /
 *          // Since we don't need this, go ahead and cancel the operation.
 *          // This is not strictly necessary, but it'll save some work on
 *          // the server.  It is okay to cancel it if running.
 *          f.cancel(true);
 *          // Do other timeout related stuff
 *      }
 * 
*/ public class MemcachedClient extends SpyObject implements MemcachedClientIF, ConnectionObserver { protected final ClientMode clientMode; protected volatile boolean shuttingDown = false; protected final long operationTimeout; protected MemcachedConnection mconn; protected final OperationFactory opFact; protected final Transcoder transcoder; protected final TranscodeService tcService; protected final AuthDescriptor authDescriptor; protected final ConnectionFactory connFactory; protected final AuthThreadMonitor authMonitor = new AuthThreadMonitor(); private NodeEndPoint configurationNode; //Set default value to true to attempt config API first. The value is set to false if //OperationNotSupportedException is thrown. private boolean isConfigurationProtocolSupported = true; //This is used to dynamic mode to track whether the client is initialized with set of cache nodes for the first time. private boolean isConfigurationInitialized = false; private Transcoder configTranscoder = new SerializingTranscoder(); private ConfigurationPoller configPoller; /** * Get a memcache client operating on the specified memcached locations. * * @param addrs the memcached locations * @throws IOException */ public MemcachedClient(InetSocketAddress... addrs) throws IOException { //The connectionFactory is created later based on client mode. this(null, Arrays.asList(addrs), true); } /** * Get a memcache client over the specified memcached locations. * * @param addrs the socket addrs * @throws IOException if connections cannot be established */ public MemcachedClient(List addrs) throws IOException { //The connectionFactory is created later based on client mode. this(null, addrs, true); } public MemcachedClient(ConnectionFactory cf, List addrs) throws IOException{ this(cf, addrs, false); } /** * Get a memcache client over the specified memcached locations. * * @param cf the connection factory to configure connections for this client * @param addrs the socket addresses * @throws IOException if connections cannot be established */ private MemcachedClient(ConnectionFactory cf, List addrs, boolean determineClientMode) throws IOException{ if (addrs == null) { throw new NullPointerException("Server list required"); } if (addrs.isEmpty()) { throw new IllegalArgumentException("You must have at least one server to" + " connect to"); } //An internal customer convenience check to determine whether the client mode based on // the DNS name if only one endpoint is specified. if(determineClientMode){ if(addrs.size() == 1){ if(addrs.get(0) == null){ throw new NullPointerException("Socket address is null"); } String hostName = addrs.get(0).getHostName(); //All config endpoints has ".cfg." subdomain in the DNS name. if(hostName != null && hostName.contains(".cfg.")){ cf = new DefaultConnectionFactory(ClientMode.Dynamic); } } //Fallback to static mode if(cf == null){ cf = new DefaultConnectionFactory(ClientMode.Static); } } if (cf == null) { throw new NullPointerException("Connection factory required"); } if (cf.getOperationTimeout() <= 0) { throw new IllegalArgumentException("Operation timeout must be positive."); } if(cf.getClientMode() == ClientMode.Dynamic && addrs.size() > 1){ throw new IllegalArgumentException("Only one configuration endpoint is valid with dynamic client mode."); } connFactory = cf; clientMode = cf.getClientMode(); tcService = new TranscodeService(cf.isDaemon()); transcoder = cf.getDefaultTranscoder(); opFact = cf.getOperationFactory(); assert opFact != null : "Connection factory failed to make op factory"; operationTimeout = cf.getOperationTimeout(); authDescriptor = cf.getAuthDescriptor(); if (authDescriptor != null) { addObserver(this); } if(clientMode == ClientMode.Dynamic){ initializeClientUsingConfigEndPoint(cf, addrs.get(0)); } else { setupConnection(cf, addrs); } } /** * Establish a connection to the configuration endpoint and get the list of cache node endpoints. Then initialize the * memcached client with the cache node endpoints list. * @param cf * @param addrs * @throws IOException */ private void initializeClientUsingConfigEndPoint(ConnectionFactory cf, InetSocketAddress configurationEndPoint) throws IOException{ configurationNode = new NodeEndPoint(configurationEndPoint.getHostName(), configurationEndPoint.getPort()); setupConnection(cf, Collections.singletonList(configurationEndPoint)); String configResult; try{ try{ //GetConfig configResult = (String)this.getConfig(configurationEndPoint, ConfigurationType.CLUSTER, configTranscoder); }catch(OperationNotSupportedException e){ configResult = (String)this.get(configurationEndPoint, ConfigurationType.CLUSTER.getValueWithNameSpace(), configTranscoder); isConfigurationProtocolSupported = false; } if(configResult != null && ! configResult.trim().isEmpty()){ //Parse configuration to get the list of cache servers. ClusterConfiguration clusterConfiguration = AddrUtil.parseClusterTypeConfiguration(configResult); //Initialize client with the actual set of endpoints. mconn.notifyUpdate(clusterConfiguration); isConfigurationInitialized = true; } }catch(OperationTimeoutException e){ getLogger().warn("Configuration endpoint timed out for config call. Leaving the initialization work to configuration poller."); } //Initialize and start the poller. configPoller = new ConfigurationPoller(this, cf.getDynamicModePollingInterval()); configPoller.subscribeForClusterConfiguration(mconn); } private void setupConnection(ConnectionFactory cf, List addrs) throws IOException { mconn = cf.createConnection(addrs); assert mconn != null : "Connection factory failed to make a connection"; } public NodeEndPoint getConfigurationNode(){ return configurationNode; } /** * Get the addresses of available servers. * *

* This is based on a snapshot in time so shouldn't be considered completely * accurate, but is a useful for getting a feel for what's working and what's * not working. *

* * @return point-in-time view of currently available servers */ public Collection getAvailableServers() { ArrayList rv = new ArrayList(); for (MemcachedNode node : mconn.getLocator().getAll()) { if (node.isActive()) { rv.add(node.getSocketAddress()); } } return rv; } /** * Get the endpoints of available servers. * Use this method instead of "getAvailableServers" if details about hostname, ipAddress and port of the servers * are required. * *

* This is based on a snapshot in time so shouldn't be considered completely * accurate, but is a useful for getting a feel for what's working and what's * not working. *

* * @return point-in-time view of currently available servers */ public Collection getAvailableNodeEndPoints() { ArrayList rv = new ArrayList(); for (MemcachedNode node : mconn.getLocator().getAll()) { if (node.isActive()) { rv.add(node.getNodeEndPoint()); } } return rv; } /** * Get the endpoints of all servers. * * @return point-in-time view of current list of servers */ public Collection getAllNodeEndPoints() { ArrayList rv = new ArrayList(); for (MemcachedNode node : mconn.getLocator().getAll()) { rv.add(node.getNodeEndPoint()); } return rv; } /** * Get the addresses of unavailable servers. * *

* This is based on a snapshot in time so shouldn't be considered completely * accurate, but is a useful for getting a feel for what's working and what's * not working. *

* * @return point-in-time view of currently available servers */ public Collection getUnavailableServers() { ArrayList rv = new ArrayList(); for (MemcachedNode node : mconn.getLocator().getAll()) { if (!node.isActive()) { rv.add(node.getSocketAddress()); } } return rv; } /** * Get a read-only wrapper around the node locator wrapping this instance. * * @return this instance's NodeLocator */ public NodeLocator getNodeLocator() { return mconn.getLocator().getReadonlyCopy(); } /** * Get the default transcoder that's in use. * * @return this instance's Transcoder */ public Transcoder getTranscoder() { return transcoder; } CountDownLatch broadcastOp(final BroadcastOpFactory of) { return broadcastOp(of, mconn.getLocator().getAll(), true); } CountDownLatch broadcastOp(final BroadcastOpFactory of, Collection nodes) { return broadcastOp(of, nodes, true); } private CountDownLatch broadcastOp(BroadcastOpFactory of, Collection nodes, boolean checkShuttingDown) { checkState(); if (checkShuttingDown && shuttingDown) { throw new IllegalStateException("Shutting down"); } return mconn.broadcastOperation(of, nodes); } private OperationFuture asyncStore(StoreType storeType, String key, int exp, T value, Transcoder tc) { CachedData co = tc.encode(value); final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.store(storeType, key, co.getFlags(), exp, co.getData(), new OperationCallback() { public void receivedStatus(OperationStatus val) { rv.set(val.isSuccess(), val); } public void complete() { latch.countDown(); } }); rv.setOperation(op); enqueueOperation(key, op); return rv; } private OperationFuture asyncStore(StoreType storeType, String key, int exp, Object value) { return asyncStore(storeType, key, exp, value, transcoder); } private OperationFuture asyncCat(ConcatenationType catType, long cas, String key, T value, Transcoder tc) { CachedData co = tc.encode(value); final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.cat(catType, cas, key, co.getData(), new OperationCallback() { public void receivedStatus(OperationStatus val) { rv.set(val.isSuccess(), val); } public void complete() { latch.countDown(); } }); rv.setOperation(op); enqueueOperation(key, op); return rv; } /** * Touch the given key to reset its expiration time with the default * transcoder. * * @param key the key to fetch * @param exp the new expiration to set for the given key * @return a future that will hold the return value of whether or not the * fetch succeeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture touch(final String key, final int exp) { return touch(key, exp, transcoder); } /** * Touch the given key to reset its expiration time. * * @param key the key to fetch * @param exp the new expiration to set for the given key * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of whether or not the * fetch succeeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture touch(final String key, final int exp, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.touch(key, exp, new OperationCallback() { public void receivedStatus(OperationStatus status) { rv.set(status.isSuccess(), status); } public void complete() { latch.countDown(); } }); rv.setOperation(op); enqueueOperation(key, op); return rv; } /** * Append to an existing value in the cache. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param cas cas identifier (ignored in the ascii protocol) * @param key the key to whose value will be appended * @param val the value to append * @return a future indicating success, false if there was no change to the * value * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture append(long cas, String key, Object val) { return append(cas, key, val, transcoder); } /** * Append to an existing value in the cache. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param * @param cas cas identifier (ignored in the ascii protocol) * @param key the key to whose value will be appended * @param val the value to append * @param tc the transcoder to serialize and unserialize the value * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture append(long cas, String key, T val, Transcoder tc) { return asyncCat(ConcatenationType.append, cas, key, val, tc); } /** * Prepend to an existing value in the cache. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param cas cas identifier (ignored in the ascii protocol) * @param key the key to whose value will be prepended * @param val the value to append * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture prepend(long cas, String key, Object val) { return prepend(cas, key, val, transcoder); } /** * Prepend to an existing value in the cache. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param * @param cas cas identifier (ignored in the ascii protocol) * @param key the key to whose value will be prepended * @param val the value to append * @param tc the transcoder to serialize and unserialize the value * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture prepend(long cas, String key, T val, Transcoder tc) { return asyncCat(ConcatenationType.prepend, cas, key, val, tc); } /** * Asynchronous CAS operation. * * @param * @param key the key * @param casId the CAS identifier (from a gets operation) * @param value the new value * @param tc the transcoder to serialize and unserialize the value * @return a future that will indicate the status of the CAS * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Future asyncCAS(String key, long casId, T value, Transcoder tc) { return asyncCAS(key, casId, 0, value, tc); } /** * Asynchronous CAS operation. * * @param * @param key the key * @param casId the CAS identifier (from a gets operation) * @param exp the expiration of this object * @param value the new value * @param tc the transcoder to serialize and unserialize the value * @return a future that will indicate the status of the CAS * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Future asyncCAS(String key, long casId, int exp, T value, Transcoder tc) { CachedData co = tc.encode(value); final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.cas(StoreType.set, key, casId, co.getFlags(), exp, co.getData(), new OperationCallback() { public void receivedStatus(OperationStatus val) { if (val instanceof CASOperationStatus) { rv.set(((CASOperationStatus) val).getCASResponse(), val); } else if (val instanceof CancelledOperationStatus) { getLogger().debug("CAS operation cancelled"); } else if (val instanceof TimedOutOperationStatus) { getLogger().debug("CAS operation timed out"); } else { throw new RuntimeException("Unhandled state: " + val); } } public void complete() { latch.countDown(); } }); rv.setOperation(op); enqueueOperation(key, op); return rv; } /** * Asynchronous CAS operation using the default transcoder. * * @param key the key * @param casId the CAS identifier (from a gets operation) * @param value the new value * @return a future that will indicate the status of the CAS * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Future asyncCAS(String key, long casId, Object value) { return asyncCAS(key, casId, value, transcoder); } /** * Perform a synchronous CAS operation. * * @param * @param key the key * @param casId the CAS identifier (from a gets operation) * @param value the new value * @param tc the transcoder to serialize and unserialize the value * @return a CASResponse * @throws OperationTimeoutException if global operation timeout is exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASResponse cas(String key, long casId, T value, Transcoder tc) { return cas(key, casId, 0, value, tc); } /** * Perform a synchronous CAS operation. * * @param * @param key the key * @param casId the CAS identifier (from a gets operation) * @param exp the expiration of this object * @param value the new value * @param tc the transcoder to serialize and unserialize the value * @return a CASResponse * @throws OperationTimeoutException if global operation timeout is exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASResponse cas(String key, long casId, int exp, T value, Transcoder tc) { try { return asyncCAS(key, casId, exp, value, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { throw new RuntimeException("Exception waiting for value", e); } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value", e); } } /** * Perform a synchronous CAS operation with the default transcoder. * * @param key the key * @param casId the CAS identifier (from a gets operation) * @param value the new value * @return a CASResponse * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASResponse cas(String key, long casId, Object value) { return cas(key, casId, value, transcoder); } /** * Add an object to the cache iff it does not exist already. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @param tc the transcoder to serialize and unserialize the value * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture add(String key, int exp, T o, Transcoder tc) { return asyncStore(StoreType.add, key, exp, o, tc); } /** * Add an object to the cache (using the default transcoder) iff it does not * exist already. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture add(String key, int exp, Object o) { return asyncStore(StoreType.add, key, exp, o, transcoder); } /** * Set an object in the cache regardless of any existing value. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @param tc the transcoder to serialize and unserialize the value * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture set(String key, int exp, T o, Transcoder tc) { return asyncStore(StoreType.set, key, exp, o, tc); } /** * Set an object in the cache (using the default transcoder) regardless of any * existing value. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture set(String key, int exp, Object o) { return asyncStore(StoreType.set, key, exp, o, transcoder); } /** * Replace an object with the given value iff there is already a value for the * given key. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @param tc the transcoder to serialize and unserialize the value * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture replace(String key, int exp, T o, Transcoder tc) { return asyncStore(StoreType.replace, key, exp, o, tc); } /** * Replace an object with the given value (transcoded with the default * transcoder) iff there is already a value for the given key. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture replace(String key, int exp, Object o) { return asyncStore(StoreType.replace, key, exp, o, transcoder); } /** * Get the given key asynchronously. * * @param * @param key the key to fetch * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public GetFuture asyncGet(final String key, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final GetFuture rv = new GetFuture(latch, operationTimeout, key); Operation op = opFact.get(key, new GetOperation.Callback() { private Future val = null; public void receivedStatus(OperationStatus status) { rv.set(val, status); } public void gotData(String k, int flags, byte[] data) { assert key.equals(k) : "Wrong key returned"; val = tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize())); } public void complete() { latch.countDown(); } }); rv.setOperation(op); enqueueOperation(key, op); return rv; } /** * Get with a single key from the specified node. * * @param * @param key the key to get * @param tc the transcoder to serialize and unserialize value * @return the result from the cache (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ T get(InetSocketAddress sa, final String key, final Transcoder tc) { try{ return asyncGet(sa, key, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { throw new RuntimeException("Exception waiting for value", e); } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value", e); } } /** * Get the given key from the specified node. * @param * @param sa - The InetSocketAddress of the node from which to fetch the key * @param key the key to fetch * @param transcoder the transcoder to serialize and unserialize value * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ GetFuture asyncGet(InetSocketAddress sa, final String key, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final GetFuture rv = new GetFuture(latch, operationTimeout, key); Operation op = opFact.get(key, new GetOperation.Callback() { private Future val = null; public void receivedStatus(OperationStatus status) { rv.set(val, status); } public void gotData(String k, int flags, byte[] data) { assert key.equals(k) : "Wrong key returned"; val = tcService.decode(tc, new CachedData(flags, data, transcoder.getMaxSize())); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(sa, op); return rv; } /** * Get the given key asynchronously and decode with the default transcoder. * * @param key the key to fetch * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public GetFuture asyncGet(final String key) { return asyncGet(key, transcoder); } /** * Gets (with CAS support) the given key asynchronously. * * @param * @param key the key to fetch * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture> asyncGets(final String key, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture> rv = new OperationFuture>(key, latch, operationTimeout); Operation op = opFact.gets(key, new GetsOperation.Callback() { private CASValue val = null; public void receivedStatus(OperationStatus status) { rv.set(val, status); } public void gotData(String k, int flags, long cas, byte[] data) { assert key.equals(k) : "Wrong key returned"; assert cas > 0 : "CAS was less than zero: " + cas; val = new CASValue(cas, tc.decode(new CachedData(flags, data, tc.getMaxSize()))); } public void complete() { latch.countDown(); } }); rv.setOperation(op); enqueueOperation(key, op); return rv; } /** * Gets (with CAS support) the given key asynchronously and decode using the * default transcoder. * * @param key the key to fetch * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture> asyncGets(final String key) { return asyncGets(key, transcoder); } /** * Gets (with CAS support) with a single key. * * @param * @param key the key to get * @param tc the transcoder to serialize and unserialize value * @return the result from the cache and CAS id (null if there is none) * @throws OperationTimeoutException if global operation timeout is exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASValue gets(String key, Transcoder tc) { try { return asyncGets(key, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { throw new RuntimeException("Exception waiting for value", e); } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value", e); } } /** * Get with a single key and reset its expiration. * * @param * @param key the key to get * @param exp the new expiration for the key * @param tc the transcoder to serialize and unserialize value * @return the result from the cache (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASValue getAndTouch(String key, int exp, Transcoder tc) { try { return asyncGetAndTouch(key, exp, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { throw new RuntimeException("Exception waiting for value", e); } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value", e); } } /** * Get a single key and reset its expiration using the default transcoder. * * @param key the key to get * @param exp the new expiration for the key * @return the result from the cache and CAS id (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASValue getAndTouch(String key, int exp) { return getAndTouch(key, exp, transcoder); } /** * Gets (with CAS support) with a single key using the default transcoder. * * @param key the key to get * @return the result from the cache and CAS id (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASValue gets(String key) { return gets(key, transcoder); } /** * Get with a single key. * * @param * @param key the key to get * @param tc the transcoder to serialize and unserialize value * @return the result from the cache (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public T get(String key, Transcoder tc) { try { return asyncGet(key, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { throw new RuntimeException("Exception waiting for value", e); } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value", e); } } /** * Get with a single key and decode using the default transcoder. * * @param key the key to get * @return the result from the cache (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Object get(String key) { return get(key, transcoder); } /** * Asynchronously get a bunch of objects from the cache. * * @param * @param keyIter Iterator that produces keys. * @param tcIter an iterator of transcoders to serialize and unserialize * values; the transcoders are matched with the keys in the same * order. The minimum of the key collection length and number of * transcoders is used and no exception is thrown if they do not * match * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Iterator keyIter, Iterator> tcIter) { final Map> m = new ConcurrentHashMap>(); // This map does not need to be a ConcurrentHashMap // because it is fully populated when it is used and // used only to read the transcoder for a key. final Map> tcMap = new HashMap>(); // Break the gets down into groups by key final Map> chunks = new HashMap>(); final NodeLocator locator = mconn.getLocator(); while (keyIter.hasNext() && tcIter.hasNext()) { String key = keyIter.next(); tcMap.put(key, tcIter.next()); StringUtils.validateKey(key); final MemcachedNode primaryNode = locator.getPrimary(key); MemcachedNode node = null; if (primaryNode.isActive()) { node = primaryNode; } else { for (Iterator i = locator.getSequence(key); node == null && i.hasNext();) { MemcachedNode n = i.next(); if (n.isActive()) { node = n; } } if (node == null) { node = primaryNode; } } assert node != null : "Didn't find a node for " + key; Collection ks = chunks.get(node); if (ks == null) { ks = new ArrayList(); chunks.put(node, ks); } ks.add(key); } final CountDownLatch latch = new CountDownLatch(chunks.size()); final Collection ops = new ArrayList(chunks.size()); final BulkGetFuture rv = new BulkGetFuture(m, ops, latch); GetOperation.Callback cb = new GetOperation.Callback() { @SuppressWarnings("synthetic-access") public void receivedStatus(OperationStatus status) { rv.setStatus(status); } public void gotData(String k, int flags, byte[] data) { Transcoder tc = tcMap.get(k); m.put(k, tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize()))); } public void complete() { latch.countDown(); } }; // Now that we know how many servers it breaks down into, and the latch // is all set up, convert all of these strings collections to operations final Map mops = new HashMap(); for (Map.Entry> me : chunks.entrySet()) { Operation op = opFact.get(me.getValue(), cb); mops.put(me.getKey(), op); ops.add(op); } assert mops.size() == chunks.size(); mconn.checkState(); mconn.addOperations(mops); return rv; } /** * Asynchronously get a bunch of objects from the cache. * * @param * @param keys the keys to request * @param tcIter an iterator of transcoders to serialize and unserialize * values; the transcoders are matched with the keys in the same * order. The minimum of the key collection length and number of * transcoders is used and no exception is thrown if they do not * match * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Collection keys, Iterator> tcIter) { return asyncGetBulk(keys.iterator(), tcIter); } /** * Asynchronously get a bunch of objects from the cache. * * @param * @param keyIter Iterator for the keys to request * @param tc the transcoder to serialize and unserialize values * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Iterator keyIter, Transcoder tc) { return asyncGetBulk(keyIter, new SingleElementInfiniteIterator>(tc)); } /** * Asynchronously get a bunch of objects from the cache. * * @param * @param keys the keys to request * @param tc the transcoder to serialize and unserialize values * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Collection keys, Transcoder tc) { return asyncGetBulk(keys, new SingleElementInfiniteIterator>( tc)); } /** * Asynchronously get a bunch of objects from the cache and decode them with * the given transcoder. * * @param keyIter Iterator that produces the keys to request * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk( Iterator keyIter) { return asyncGetBulk(keyIter, transcoder); } /** * Asynchronously get a bunch of objects from the cache and decode them with * the given transcoder. * * @param keys the keys to request * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Collection keys) { return asyncGetBulk(keys, transcoder); } /** * Varargs wrapper for asynchronous bulk gets. * * @param * @param tc the transcoder to serialize and unserialize value * @param keys one more more keys to get * @return the future values of those keys * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Transcoder tc, String... keys) { return asyncGetBulk(Arrays.asList(keys), tc); } /** * Varargs wrapper for asynchronous bulk gets with the default transcoder. * * @param keys one more more keys to get * @return the future values of those keys * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(String... keys) { return asyncGetBulk(Arrays.asList(keys), transcoder); } /** * Get the given key to reset its expiration time. * * @param key the key to fetch * @param exp the new expiration to set for the given key * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture> asyncGetAndTouch(final String key, final int exp) { return asyncGetAndTouch(key, exp, transcoder); } /** * Get the given key to reset its expiration time. * * @param key the key to fetch * @param exp the new expiration to set for the given key * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture> asyncGetAndTouch(final String key, final int exp, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture> rv = new OperationFuture>( key, latch, operationTimeout); Operation op = opFact.getAndTouch(key, exp, new GetAndTouchOperation.Callback() { private CASValue val = null; public void receivedStatus(OperationStatus status) { rv.set(val, status); } public void complete() { latch.countDown(); } public void gotData(String k, int flags, long cas, byte[] data) { assert k.equals(key) : "Wrong key returned"; assert cas > 0 : "CAS was less than zero: " + cas; val = new CASValue(cas, tc.decode(new CachedData(flags, data, tc.getMaxSize()))); } }); rv.setOperation(op); enqueueOperation(key, op); return rv; } /** * Get the values for multiple keys from the cache. * * @param * @param keyIter Iterator that produces the keys * @param tc the transcoder to serialize and unserialize value * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Iterator keyIter, Transcoder tc) { try { return asyncGetBulk(keyIter, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted getting bulk values", e); } catch (ExecutionException e) { throw new RuntimeException("Failed getting bulk values", e); } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for bulkvalues", e); } } /** * Get the values for multiple keys from the cache. * * @param keyIter Iterator that produces the keys * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Iterator keyIter) { return getBulk(keyIter, transcoder); } /** * Get the values for multiple keys from the cache. * * @param * @param keys the keys * @param tc the transcoder to serialize and unserialize value * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Collection keys, Transcoder tc) { return getBulk(keys.iterator(), tc); } /** * Get the values for multiple keys from the cache. * * @param keys the keys * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Collection keys) { return getBulk(keys, transcoder); } /** * Get the values for multiple keys from the cache. * * @param * @param tc the transcoder to serialize and unserialize value * @param keys the keys * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Transcoder tc, String... keys) { return getBulk(Arrays.asList(keys), tc); } /** * Get the values for multiple keys from the cache. * * @param keys the keys * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(String... keys) { return getBulk(Arrays.asList(keys), transcoder); } private void enqueueOperation(String key, Operation op){ checkState(); mconn.enqueueOperation(key, op); } private void checkState() { if (clientMode == ClientMode.Dynamic && !isConfigurationInitialized) { throw new IllegalStateException("Client is not initialized"); } } /** * Get the config * * @param addr - The node from which to retrieve the configuration * @param type - config to get * @return the result from the server. * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Object getConfig(InetSocketAddress addr, ConfigurationType type) { return getConfig(addr, type, transcoder); } /** * Get the config using the config protocol. * The command format is "config get " * @param addr - The node from which to retrieve the configuration * @param config to get * @param tc the transcoder to serialize and unserialize value * @return the result from the server (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public T getConfig(InetSocketAddress addr, ConfigurationType type, Transcoder tc) { try { return asyncGetConfig(addr, type, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for config", e); } catch(OperationNotSupportedException e){ throw e; } catch (ExecutionException e) { if(e.getCause() instanceof OperationException){ OperationException exp = (OperationException)e.getCause(); if(OperationErrorType.GENERAL.equals(exp.getType())){ throw new OperationNotSupportedException("This version of getConfig command is not supported."); } } throw new RuntimeException("Exception waiting for config", e); } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for config", e); } } /** * Get the given configurationType asynchronously. * * @param addr - The node from which to retrieve the configuration * @param configurationType the configurationType to fetch * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public GetConfigFuture asyncGetConfig(InetSocketAddress addr, final ConfigurationType type, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final GetConfigFuture rv = new GetConfigFuture(latch, operationTimeout, type); Operation op = opFact.getConfig(type, new GetConfigOperation.Callback() { private Future val = null; public void receivedStatus(OperationStatus status) { rv.set(val, status); } public void gotData(ConfigurationType configurationType, int flags, byte[] data) { assert type.equals(configurationType) : "Wrong type returned"; val = tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize())); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(addr, op); return rv; } /** * Sets the configuration in the cache node for the specified configurationType. * * @param addr - The node where the configuration is set. * @param type the type under which this configuration should be added. * @param o the configuration to store * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture setConfig(InetSocketAddress addr, ConfigurationType configurationType, Object o) { return asyncSetConfig(addr, configurationType, o, transcoder); } /** * Sets the configuration in the cache node for the specified configurationType. * * @param addr - The node where the configuration is set. * @param type the type under which this configuration should be added. * @param o the configuration to store * @param tc the transcoder to serialize and unserialize the configuration * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture setConfig(InetSocketAddress addr, ConfigurationType configurationType, Object o, Transcoder tc) { return asyncSetConfig(addr, configurationType, o, transcoder); } private OperationFuture asyncSetConfig(InetSocketAddress addr, ConfigurationType configurationType, T value, Transcoder tc) { CachedData co = tc.encode(value); final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(configurationType.getValue(), latch, operationTimeout); Operation op = opFact.setConfig(configurationType, co.getFlags(), co.getData(), new OperationCallback() { public void receivedStatus(OperationStatus val) { rv.set(val.isSuccess(), val); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(addr, op); return rv; } /** * Delete the given configurationType from the cache server. * * @param addr - The node in which the configuration is deleted. * @param configurationType the configurationType to delete * @return whether or not the operation was performed * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture deleteConfig(InetSocketAddress addr, ConfigurationType configurationType) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(configurationType.getValue(), latch, operationTimeout); DeleteConfigOperation op = opFact.deleteConfig(configurationType, new OperationCallback() { public void receivedStatus(OperationStatus s) { rv.set(s.isSuccess(), s); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(addr, op); return rv; } /** * Get the versions of all of the connected memcacheds. * * @return a Map of SocketAddress to String for connected servers * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getVersions() { final Map rv = new ConcurrentHashMap(); CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { final SocketAddress sa = n.getSocketAddress(); return opFact.version(new OperationCallback() { public void receivedStatus(OperationStatus s) { rv.put(sa, s.getMessage()); } public void complete() { latch.countDown(); } }); } }); try { blatch.await(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for versions", e); } return rv; } /** * Get all of the stats from all of the connections. * * @return a Map of a Map of stats replies by SocketAddress * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map> getStats() { return getStats(null); } /** * Get a set of stats from all connections. * * @param arg which stats to get * @return a Map of the server SocketAddress to a map of String stat keys to * String stat values. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map> getStats(final String arg) { final Map> rv = new HashMap>(); CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { final SocketAddress sa = n.getSocketAddress(); rv.put(sa, new HashMap()); return opFact.stats(arg, new StatsOperation.Callback() { public void gotStat(String name, String val) { rv.get(sa).put(name, val); } @SuppressWarnings("synthetic-access") public void receivedStatus(OperationStatus status) { if (!status.isSuccess()) { getLogger().warn("Unsuccessful stat fetch: %s", status); } } public void complete() { latch.countDown(); } }); } }); try { blatch.await(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for stats", e); } return rv; } private long mutate(Mutator m, String key, long by, long def, int exp) { final AtomicLong rv = new AtomicLong(); final CountDownLatch latch = new CountDownLatch(1); enqueueOperation(key, opFact.mutate(m, key, by, def, exp, new OperationCallback() { public void receivedStatus(OperationStatus s) { // XXX: Potential abstraction leak. // The handling of incr/decr in the binary protocol // Allows us to avoid string processing. rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1")); } public void complete() { latch.countDown(); } })); try { if (!latch.await(operationTimeout, TimeUnit.MILLISECONDS)) { throw new OperationTimeoutException("Mutate operation timed out," + "unable to modify counter [" + key + "]"); } } catch (InterruptedException e) { throw new RuntimeException("Interrupted", e); } getLogger().debug("Mutation returned %s", rv); return rv.get(); } /** * Increment the given key by the given amount. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to increment * @return the new value (-1 if the key doesn't exist) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, long by) { return mutate(Mutator.incr, key, by, 0, -1); } /** * Increment the given key by the given amount. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to increment * @return the new value (-1 if the key doesn't exist) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, int by) { return mutate(Mutator.incr, key, (long)by, 0, -1); } /** * Decrement the given key by the given value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the value * @return the new value (-1 if the key doesn't exist) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, long by) { return mutate(Mutator.decr, key, by, 0, -1); } /** * Decrement the given key by the given value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the value * @return the new value (-1 if the key doesn't exist) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, int by) { return mutate(Mutator.decr, key, (long)by, 0, -1); } /** * Increment the given counter, returning the new value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to increment * @param def the default value (if the counter does not exist) * @param exp the expiration of this object * @return the new value, or -1 if we were unable to increment or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, long by, long def, int exp) { return mutateWithDefault(Mutator.incr, key, by, def, exp); } /** * Increment the given counter, returning the new value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to increment * @param def the default value (if the counter does not exist) * @param exp the expiration of this object * @return the new value, or -1 if we were unable to increment or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, int by, long def, int exp) { return mutateWithDefault(Mutator.incr, key, (long)by, def, exp); } /** * Decrement the given counter, returning the new value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to decrement * @param def the default value (if the counter does not exist) * @param exp the expiration of this object * @return the new value, or -1 if we were unable to decrement or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, long by, long def, int exp) { return mutateWithDefault(Mutator.decr, key, by, def, exp); } /** * Decrement the given counter, returning the new value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to decrement * @param def the default value (if the counter does not exist) * @param exp the expiration of this object * @return the new value, or -1 if we were unable to decrement or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, int by, long def, int exp) { return mutateWithDefault(Mutator.decr, key, (long)by, def, exp); } private long mutateWithDefault(Mutator t, String key, long by, long def, int exp) { long rv = mutate(t, key, by, def, exp); // The ascii protocol doesn't support defaults, so I added them // manually here. if (rv == -1) { Future f = asyncStore(StoreType.add, key, exp, String.valueOf(def)); try { if (f.get(operationTimeout, TimeUnit.MILLISECONDS)) { rv = def; } else { rv = mutate(t, key, by, 0, exp); assert rv != -1 : "Failed to mutate or init value"; } } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for store", e); } catch (ExecutionException e) { throw new RuntimeException("Failed waiting for store", e); } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting to mutate or init" + " value", e); } } return rv; } private OperationFuture asyncMutate(Mutator m, String key, long by, long def, int exp) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.mutate(m, key, by, def, exp, new OperationCallback() { public void receivedStatus(OperationStatus s) { rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1"), s); } public void complete() { latch.countDown(); } }); enqueueOperation(key, op); rv.setOperation(op); return rv; } /** * Asychronous increment. * * @param key key to increment * @param by the amount to increment the value by * @return a future with the incremented value, or -1 if the increment failed. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncIncr(String key, long by) { return asyncMutate(Mutator.incr, key, by, 0, -1); } /** * Asychronous increment. * * @param key key to increment * @param by the amount to increment the value by * @return a future with the incremented value, or -1 if the increment failed. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncIncr(String key, int by) { return asyncMutate(Mutator.incr, key, (long)by, 0, -1); } /** * Asynchronous decrement. * * @param key key to increment * @param by the amount to increment the value by * @return a future with the decremented value, or -1 if the increment failed. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncDecr(String key, long by) { return asyncMutate(Mutator.decr, key, by, 0, -1); } /** * Asynchronous decrement. * * @param key key to increment * @param by the amount to increment the value by * @return a future with the decremented value, or -1 if the increment failed. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncDecr(String key, int by) { return asyncMutate(Mutator.decr, key, (long)by, 0, -1); } /** * Increment the given counter, returning the new value. * * @param key the key * @param by the amount to increment * @param def the default value (if the counter does not exist) * @return the new value, or -1 if we were unable to increment or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, long by, long def) { return mutateWithDefault(Mutator.incr, key, by, def, 0); } /** * Increment the given counter, returning the new value. * * @param key the key * @param by the amount to increment * @param def the default value (if the counter does not exist) * @return the new value, or -1 if we were unable to increment or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, int by, long def) { return mutateWithDefault(Mutator.incr, key, (long)by, def, 0); } /** * Decrement the given counter, returning the new value. * * @param key the key * @param by the amount to decrement * @param def the default value (if the counter does not exist) * @return the new value, or -1 if we were unable to decrement or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, long by, long def) { return mutateWithDefault(Mutator.decr, key, by, def, 0); } /** * Decrement the given counter, returning the new value. * * @param key the key * @param by the amount to decrement * @param def the default value (if the counter does not exist) * @return the new value, or -1 if we were unable to decrement or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, int by, long def) { return mutateWithDefault(Mutator.decr, key, (long)by, def, 0); } /** * Delete the given key from the cache. * *

* The hold argument specifies the amount of time in seconds (or Unix time * until which) the client wishes the server to refuse "add" and "replace" * commands with this key. For this amount of item, the item is put into a * delete queue, which means that it won't possible to retrieve it by the * "get" command, but "add" and "replace" command with this key will also fail * (the "set" command will succeed, however). After the time passes, the item * is finally deleted from server memory. *

* * @param key the key to delete * @param hold how long the key should be unavailable to add commands * * @return whether or not the operation was performed * @deprecated Hold values are no longer honored. */ @Deprecated public OperationFuture delete(String key, int hold) { return delete(key); } /** * Delete the given key from the cache. * * @param key the key to delete * @return whether or not the operation was performed * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture delete(String key) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); DeleteOperation op = opFact.delete(key, new OperationCallback() { public void receivedStatus(OperationStatus s) { rv.set(s.isSuccess(), s); } public void complete() { latch.countDown(); } }); rv.setOperation(op); enqueueOperation(key, op); return rv; } /** * Flush all caches from all servers with a delay of application. * * @param delay the period of time to delay, in seconds * @return whether or not the operation was accepted * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture flush(final int delay) { final AtomicReference flushResult = new AtomicReference(null); final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { Operation op = opFact.flush(delay, new OperationCallback() { public void receivedStatus(OperationStatus s) { flushResult.set(s.isSuccess()); } public void complete() { latch.countDown(); } }); ops.add(op); return op; } }); return new OperationFuture(null, blatch, flushResult, operationTimeout) { @Override public boolean cancel(boolean ign) { boolean rv = false; for (Operation op : ops) { op.cancel(); rv |= op.getState() == OperationState.WRITE_QUEUED; } return rv; } @Override public Boolean get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { status = new OperationStatus(true, "OK"); return super.get(duration, units); } @Override public boolean isCancelled() { boolean rv = false; for (Operation op : ops) { rv |= op.isCancelled(); } return rv; } @Override public boolean isDone() { boolean rv = true; for (Operation op : ops) { rv &= op.getState() == OperationState.COMPLETE; } return rv || isCancelled(); } }; } /** * Flush all caches from all servers immediately. * * @return whether or not the operation was performed * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture flush() { return flush(-1); } public Set listSaslMechanisms() { final ConcurrentMap rv = new ConcurrentHashMap(); CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(MemcachedNode n, final CountDownLatch latch) { return opFact.saslMechs(new OperationCallback() { public void receivedStatus(OperationStatus status) { for (String s : status.getMessage().split(" ")) { rv.put(s, s); } } public void complete() { latch.countDown(); } }); } }); try { blatch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } return rv.keySet(); } /** * Shut down immediately. */ public void shutdown() { shutdown(-1, TimeUnit.MILLISECONDS); } /** * Shut down this client gracefully. * * @param timeout the amount of time time for shutdown * @param unit the TimeUnit for the timeout * @return result of the shutdown request */ public boolean shutdown(long timeout, TimeUnit unit) { // Guard against double shutdowns (bug 8). if (shuttingDown) { getLogger().info("Suppressing duplicate attempt to shut down"); return false; } shuttingDown = true; String baseName = mconn.getName(); mconn.setName(baseName + " - SHUTTING DOWN"); boolean rv = true; try { // Conditionally wait if (timeout > 0) { mconn.setName(baseName + " - SHUTTING DOWN (waiting)"); rv = waitForQueues(timeout, unit); } } finally { // But always begin the shutdown sequence try { if(clientMode == ClientMode.Dynamic){ configPoller.shutdown(); } mconn.setName(baseName + " - SHUTTING DOWN (telling client)"); mconn.shutdown(); mconn.setName(baseName + " - SHUTTING DOWN (informed client)"); tcService.shutdown(); } catch (IOException e) { getLogger().warn("exception while shutting down", e); } } return rv; } /** * Wait for the queues to die down. * * @param timeout the amount of time time for shutdown * @param unit the TimeUnit for the timeout * @return result of the request for the wait * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public boolean waitForQueues(long timeout, TimeUnit unit) { CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { return opFact.noop(new OperationCallback() { public void complete() { latch.countDown(); } public void receivedStatus(OperationStatus s) { // Nothing special when receiving status, only // necessary to complete the interface } }); } }, mconn.getLocator().getAll(), false); try { // XXX: Perhaps IllegalStateException should be caught here // and the check retried. return blatch.await(timeout, unit); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for queues", e); } } /** * Add a connection observer. * * If connections are already established, your observer will be called with * the address and -1. * * @param obs the ConnectionObserver you wish to add * @return true if the observer was added. */ public boolean addObserver(ConnectionObserver obs) { boolean rv = mconn.addObserver(obs); if (rv) { for (MemcachedNode node : mconn.getLocator().getAll()) { if (node.isActive()) { obs.connectionEstablished(node.getSocketAddress(), -1); } } } return rv; } /** * Remove a connection observer. * * @param obs the ConnectionObserver you wish to add * @return true if the observer existed, but no longer does */ public boolean removeObserver(ConnectionObserver obs) { return mconn.removeObserver(obs); } public void connectionEstablished(SocketAddress sa, int reconnectCount) { if (authDescriptor != null) { if (authDescriptor.authThresholdReached()) { this.shutdown(); } authMonitor.authConnection(mconn, opFact, authDescriptor, findNode(sa)); } } private MemcachedNode findNode(SocketAddress sa) { MemcachedNode node = null; for (MemcachedNode n : mconn.getLocator().getAll()) { if (n.getSocketAddress().equals(sa)) { node = n; } } assert node != null : "Couldn't find node connected to " + sa; return node; } public void connectionLost(SocketAddress sa) { // Don't care. } public boolean isConfigurationProtocolSupported(){ return isConfigurationProtocolSupported; } void setIsConfigurationProtocolSupported(boolean isConfigurationProtocolSupported){ this.isConfigurationProtocolSupported = isConfigurationProtocolSupported; } public boolean isConfigurationInitialized(){ return isConfigurationInitialized; } void setIsConfigurtionInitialized(boolean isConfigurationInitialized){ this.isConfigurationInitialized = isConfigurationInitialized; } @Override public String toString() { return connFactory.toString(); } }