All Downloads are FREE. Search and download functionalities are using the official Maven repository.

net.spy.memcached.MemcachedClient Maven / Gradle / Ivy

There is a newer version: 2.12.3
Show newest version
/**
 * Copyright (C) 2006-2009 Dustin Sallings
 * Copyright (C) 2009-2011 Couchbase, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALING
 * IN THE SOFTWARE.
 */

package net.spy.memcached;

import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;

import net.spy.memcached.auth.AuthDescriptor;
import net.spy.memcached.auth.AuthThreadMonitor;
import net.spy.memcached.compat.SpyObject;
import net.spy.memcached.internal.BulkFuture;
import net.spy.memcached.internal.BulkGetFuture;
import net.spy.memcached.internal.GetFuture;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.internal.SingleElementInfiniteIterator;
import net.spy.memcached.ops.CASOperationStatus;
import net.spy.memcached.ops.CancelledOperationStatus;
import net.spy.memcached.ops.ConcatenationType;
import net.spy.memcached.ops.DeleteOperation;
import net.spy.memcached.ops.GetAndTouchOperation;
import net.spy.memcached.ops.GetOperation;
import net.spy.memcached.ops.GetsOperation;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StatsOperation;
import net.spy.memcached.ops.StoreOperation;
import net.spy.memcached.ops.StoreType;
import net.spy.memcached.ops.TimedOutOperationStatus;
import net.spy.memcached.protocol.binary.BinaryOperationFactory;
import net.spy.memcached.transcoders.TranscodeService;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.util.StringUtils;

/**
 * Client to a memcached server.
 *
 * 

Basic usage

* *
 * MemcachedClient c = new MemcachedClient(
 *    new InetSocketAddress("hostname", portNum));
 *
 * // Store a value (async) for one hour
 * c.set("someKey", 3600, someObject);
 * // Retrieve a value.
 * Object myObject = c.get("someKey");
 * 
* *

Advanced Usage

* *

* MemcachedClient may be processing a great deal of asynchronous messages or * possibly dealing with an unreachable memcached, which may delay processing. * If a memcached is disabled, for example, MemcachedConnection will continue to * attempt to reconnect and replay pending operations until it comes back up. To * prevent this from causing your application to hang, you can use one of the * asynchronous mechanisms to time out a request and cancel the operation to the * server. *

* *
 *      // Get a memcached client connected to several servers
 *      // over the binary protocol
 *      MemcachedClient c = new MemcachedClient(new BinaryConnectionFactory(),
 *              AddrUtil.getAddresses("server1:11211 server2:11211"));
 *
 *      // Try to get a value, for up to 5 seconds, and cancel if it
 *      // doesn't return
 *      Object myObj = null;
 *      Future<Object> f = c.asyncGet("someKey");
 *      try {
 *          myObj = f.get(5, TimeUnit.SECONDS);
 *      // throws expecting InterruptedException, ExecutionException
 *      // or TimeoutException
 *      } catch (Exception e) {  /*  /
 *          // Since we don't need this, go ahead and cancel the operation.
 *          // This is not strictly necessary, but it'll save some work on
 *          // the server.  It is okay to cancel it if running.
 *          f.cancel(true);
 *          // Do other timeout related stuff
 *      }
 * 
* *

Optionally, it is possible to activate a check that makes sure that * the node is alive and responding before running actual operations (even * before authentication. Only enable this if you are sure that you do not * run into issues during connection (some memcached services have problems * with it). You can enable it by setting the net.spy.verifyAliveOnConnect * System Property to "true".

*/ public class MemcachedClient extends SpyObject implements MemcachedClientIF, ConnectionObserver { protected volatile boolean shuttingDown = false; protected final long operationTimeout; protected final MemcachedConnection mconn; protected final OperationFactory opFact; protected final Transcoder transcoder; protected final TranscodeService tcService; protected final AuthDescriptor authDescriptor; protected final ConnectionFactory connFactory; protected final AuthThreadMonitor authMonitor = new AuthThreadMonitor(); /** * Get a memcache client operating on the specified memcached locations. * * @param ia the memcached locations * @throws IOException if connections cannot be established */ public MemcachedClient(InetSocketAddress... ia) throws IOException { this(new DefaultConnectionFactory(), Arrays.asList(ia)); } /** * Get a memcache client over the specified memcached locations. * * @param addrs the socket addrs * @throws IOException if connections cannot be established */ public MemcachedClient(List addrs) throws IOException { this(new DefaultConnectionFactory(), addrs); } /** * Get a memcache client over the specified memcached locations. * * @param cf the connection factory to configure connections for this client * @param addrs the socket addresses * @throws IOException if connections cannot be established */ public MemcachedClient(ConnectionFactory cf, List addrs) throws IOException { if (cf == null) { throw new NullPointerException("Connection factory required"); } if (addrs == null) { throw new NullPointerException("Server list required"); } if (addrs.isEmpty()) { throw new IllegalArgumentException("You must have at least one server to" + " connect to"); } if (cf.getOperationTimeout() <= 0) { throw new IllegalArgumentException("Operation timeout must be positive."); } connFactory = cf; tcService = new TranscodeService(cf.isDaemon()); transcoder = cf.getDefaultTranscoder(); opFact = cf.getOperationFactory(); assert opFact != null : "Connection factory failed to make op factory"; mconn = cf.createConnection(addrs); assert mconn != null : "Connection factory failed to make a connection"; operationTimeout = cf.getOperationTimeout(); authDescriptor = cf.getAuthDescriptor(); if (authDescriptor != null) { addObserver(this); } } /** * Get the addresses of available servers. * *

* This is based on a snapshot in time so shouldn't be considered completely * accurate, but is a useful for getting a feel for what's working and what's * not working. *

* * @return point-in-time view of currently available servers */ public Collection getAvailableServers() { ArrayList rv = new ArrayList(); for (MemcachedNode node : mconn.getLocator().getAll()) { if (node.isActive()) { rv.add(node.getSocketAddress()); } } return rv; } /** * Get the addresses of unavailable servers. * *

* This is based on a snapshot in time so shouldn't be considered completely * accurate, but is a useful for getting a feel for what's working and what's * not working. *

* * @return point-in-time view of currently available servers */ public Collection getUnavailableServers() { ArrayList rv = new ArrayList(); for (MemcachedNode node : mconn.getLocator().getAll()) { if (!node.isActive()) { rv.add(node.getSocketAddress()); } } return rv; } /** * Get a read-only wrapper around the node locator wrapping this instance. * * @return this instance's NodeLocator */ public NodeLocator getNodeLocator() { return mconn.getLocator().getReadonlyCopy(); } /** * Get the default transcoder that's in use. * * @return this instance's Transcoder */ public Transcoder getTranscoder() { return transcoder; } public CountDownLatch broadcastOp(final BroadcastOpFactory of) { return broadcastOp(of, mconn.getLocator().getAll(), true); } public CountDownLatch broadcastOp(final BroadcastOpFactory of, Collection nodes) { return broadcastOp(of, nodes, true); } private CountDownLatch broadcastOp(BroadcastOpFactory of, Collection nodes, boolean checkShuttingDown) { if (checkShuttingDown && shuttingDown) { throw new IllegalStateException("Shutting down"); } return mconn.broadcastOperation(of, nodes); } private OperationFuture asyncStore(StoreType storeType, String key, int exp, T value, Transcoder tc) { CachedData co = tc.encode(value); final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.store(storeType, key, co.getFlags(), exp, co.getData(), new StoreOperation.Callback() { public void receivedStatus(OperationStatus val) { rv.set(val.isSuccess(), val); } public void gotData(String key, long cas) { rv.setCas(cas); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(key, op); return rv; } private OperationFuture asyncStore(StoreType storeType, String key, int exp, Object value) { return asyncStore(storeType, key, exp, value, transcoder); } private OperationFuture asyncCat(ConcatenationType catType, long cas, String key, T value, Transcoder tc) { CachedData co = tc.encode(value); final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.cat(catType, cas, key, co.getData(), new OperationCallback() { public void receivedStatus(OperationStatus val) { rv.set(val.isSuccess(), val); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(key, op); return rv; } /** * Touch the given key to reset its expiration time with the default * transcoder. * * @param key the key to fetch * @param exp the new expiration to set for the given key * @return a future that will hold the return value of whether or not the * fetch succeeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture touch(final String key, final int exp) { return touch(key, exp, transcoder); } /** * Touch the given key to reset its expiration time. * * @param key the key to fetch * @param exp the new expiration to set for the given key * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of whether or not the * fetch succeeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture touch(final String key, final int exp, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.touch(key, exp, new OperationCallback() { public void receivedStatus(OperationStatus status) { rv.set(status.isSuccess(), status); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(key, op); return rv; } /** * Append to an existing value in the cache. * * If 0 is passed in as the CAS identifier, it will override the value * on the server without performing the CAS check. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param cas cas identifier (ignored in the ascii protocol) * @param key the key to whose value will be appended * @param val the value to append * @return a future indicating success, false if there was no change to the * value * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture append(long cas, String key, Object val) { return append(cas, key, val, transcoder); } /** * Append to an existing value in the cache. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param key the key to whose value will be appended * @param val the value to append * @return a future indicating success, false if there was no change to the * value * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture append(String key, Object val) { return append(0, key, val, transcoder); } /** * Append to an existing value in the cache. * * If 0 is passed in as the CAS identifier, it will override the value * on the server without performing the CAS check. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param * @param cas cas identifier (ignored in the ascii protocol) * @param key the key to whose value will be appended * @param val the value to append * @param tc the transcoder to serialize and unserialize the value * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture append(long cas, String key, T val, Transcoder tc) { return asyncCat(ConcatenationType.append, cas, key, val, tc); } /** * Append to an existing value in the cache. * * If 0 is passed in as the CAS identifier, it will override the value * on the server without performing the CAS check. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param * @param key the key to whose value will be appended * @param val the value to append * @param tc the transcoder to serialize and unserialize the value * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture append(String key, T val, Transcoder tc) { return asyncCat(ConcatenationType.append, 0, key, val, tc); } /** * Prepend to an existing value in the cache. * * If 0 is passed in as the CAS identifier, it will override the value * on the server without performing the CAS check. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param cas cas identifier (ignored in the ascii protocol) * @param key the key to whose value will be prepended * @param val the value to append * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture prepend(long cas, String key, Object val) { return prepend(cas, key, val, transcoder); } /** * Prepend to an existing value in the cache. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param key the key to whose value will be prepended * @param val the value to append * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture prepend(String key, Object val) { return prepend(0, key, val, transcoder); } /** * Prepend to an existing value in the cache. * * If 0 is passed in as the CAS identifier, it will override the value * on the server without performing the CAS check. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param * @param cas cas identifier (ignored in the ascii protocol) * @param key the key to whose value will be prepended * @param val the value to append * @param tc the transcoder to serialize and unserialize the value * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture prepend(long cas, String key, T val, Transcoder tc) { return asyncCat(ConcatenationType.prepend, cas, key, val, tc); } /** * Prepend to an existing value in the cache. * *

* Note that the return will be false any time a mutation has not occurred. *

* * @param * @param key the key to whose value will be prepended * @param val the value to append * @param tc the transcoder to serialize and unserialize the value * @return a future indicating success * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture prepend(String key, T val, Transcoder tc) { return asyncCat(ConcatenationType.prepend, 0, key, val, tc); } /** * Asynchronous CAS operation. * * @param * @param key the key * @param casId the CAS identifier (from a gets operation) * @param value the new value * @param tc the transcoder to serialize and unserialize the value * @return a future that will indicate the status of the CAS * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncCAS(String key, long casId, T value, Transcoder tc) { return asyncCAS(key, casId, 0, value, tc); } /** * Asynchronous CAS operation. * * @param * @param key the key * @param casId the CAS identifier (from a gets operation) * @param exp the expiration of this object * @param value the new value * @param tc the transcoder to serialize and unserialize the value * @return a future that will indicate the status of the CAS * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncCAS(String key, long casId, int exp, T value, Transcoder tc) { CachedData co = tc.encode(value); final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.cas(StoreType.set, key, casId, co.getFlags(), exp, co.getData(), new StoreOperation.Callback() { public void receivedStatus(OperationStatus val) { if (val instanceof CASOperationStatus) { rv.set(((CASOperationStatus) val).getCASResponse(), val); } else if (val instanceof CancelledOperationStatus) { getLogger().debug("CAS operation cancelled"); } else if (val instanceof TimedOutOperationStatus) { getLogger().debug("CAS operation timed out"); } else { throw new RuntimeException("Unhandled state: " + val); } } public void gotData(String key, long cas) { rv.setCas(cas); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(key, op); return rv; } /** * Asynchronous CAS operation using the default transcoder. * * @param key the key * @param casId the CAS identifier (from a gets operation) * @param value the new value * @return a future that will indicate the status of the CAS * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncCAS(String key, long casId, Object value) { return asyncCAS(key, casId, value, transcoder); } /** * Perform a synchronous CAS operation. * * @param * @param key the key * @param casId the CAS identifier (from a gets operation) * @param value the new value * @param tc the transcoder to serialize and unserialize the value * @return a CASResponse * @throws OperationTimeoutException if global operation timeout is exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASResponse cas(String key, long casId, T value, Transcoder tc) { return cas(key, casId, 0, value, tc); } /** * Perform a synchronous CAS operation. * * @param * @param key the key * @param casId the CAS identifier (from a gets operation) * @param exp the expiration of this object * @param value the new value * @param tc the transcoder to serialize and unserialize the value * @return a CASResponse * @throws OperationTimeoutException if global operation timeout is exceeded * @throws CancellationException if operation was canceled * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASResponse cas(String key, long casId, int exp, T value, Transcoder tc) { CASResponse casr = null; try { OperationFuture casOp = asyncCAS(key, casId, exp, value, tc); casr = casOp.get(operationTimeout, TimeUnit.MILLISECONDS); return casr; } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { if(e.getCause() instanceof CancellationException) { throw (CancellationException) e.getCause(); } else { throw new RuntimeException("Exception waiting for value", e); } } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value: " + buildTimeoutMessage(operationTimeout, TimeUnit.MILLISECONDS), e); } } /** * Perform a synchronous CAS operation with the default transcoder. * * @param key the key * @param casId the CAS identifier (from a gets operation) * @param value the new value * @return a CASResponse * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASResponse cas(String key, long casId, Object value) { return cas(key, casId, value, transcoder); } /** * Add an object to the cache iff it does not exist already. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @param tc the transcoder to serialize and unserialize the value * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture add(String key, int exp, T o, Transcoder tc) { return asyncStore(StoreType.add, key, exp, o, tc); } /** * Add an object to the cache (using the default transcoder) iff it does not * exist already. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture add(String key, int exp, Object o) { return asyncStore(StoreType.add, key, exp, o, transcoder); } /** * Set an object in the cache regardless of any existing value. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @param tc the transcoder to serialize and unserialize the value * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture set(String key, int exp, T o, Transcoder tc) { return asyncStore(StoreType.set, key, exp, o, tc); } /** * Set an object in the cache (using the default transcoder) regardless of any * existing value. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture set(String key, int exp, Object o) { return asyncStore(StoreType.set, key, exp, o, transcoder); } /** * Replace an object with the given value iff there is already a value for the * given key. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @param tc the transcoder to serialize and unserialize the value * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture replace(String key, int exp, T o, Transcoder tc) { return asyncStore(StoreType.replace, key, exp, o, tc); } /** * Replace an object with the given value (transcoded with the default * transcoder) iff there is already a value for the given key. * *

* The exp value is passed along to memcached exactly as given, * and will be processed per the memcached protocol specification: *

* *

* Note that the return will be false any time a mutation has not occurred. *

* *
*

* The actual value sent may either be Unix time (number of seconds since * January 1, 1970, as a 32-bit value), or a number of seconds starting from * current time. In the latter case, this number of seconds may not exceed * 60*60*24*30 (number of seconds in 30 days); if the number sent by a client * is larger than that, the server will consider it to be real Unix time value * rather than an offset from current time. *

*
* * @param key the key under which this object should be added. * @param exp the expiration of this object * @param o the object to store * @return a future representing the processing of this operation * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture replace(String key, int exp, Object o) { return asyncStore(StoreType.replace, key, exp, o, transcoder); } /** * Get the given key asynchronously. * * @param * @param key the key to fetch * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public GetFuture asyncGet(final String key, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final GetFuture rv = new GetFuture(latch, operationTimeout, key); Operation op = opFact.get(key, new GetOperation.Callback() { private Future val = null; public void receivedStatus(OperationStatus status) { rv.set(val, status); } public void gotData(String k, int flags, byte[] data) { assert key.equals(k) : "Wrong key returned"; val = tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize())); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(key, op); return rv; } /** * Get the given key asynchronously and decode with the default transcoder. * * @param key the key to fetch * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public GetFuture asyncGet(final String key) { return asyncGet(key, transcoder); } /** * Gets (with CAS support) the given key asynchronously. * * @param * @param key the key to fetch * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture> asyncGets(final String key, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture> rv = new OperationFuture>(key, latch, operationTimeout); Operation op = opFact.gets(key, new GetsOperation.Callback() { private CASValue val = null; public void receivedStatus(OperationStatus status) { rv.set(val, status); } public void gotData(String k, int flags, long cas, byte[] data) { assert key.equals(k) : "Wrong key returned"; assert cas > 0 : "CAS was less than zero: " + cas; val = new CASValue(cas, tc.decode(new CachedData(flags, data, tc.getMaxSize()))); } public void complete() { latch.countDown(); } }); rv.setOperation(op); mconn.enqueueOperation(key, op); return rv; } /** * Gets (with CAS support) the given key asynchronously and decode using the * default transcoder. * * @param key the key to fetch * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture> asyncGets(final String key) { return asyncGets(key, transcoder); } /** * Gets (with CAS support) with a single key. * * @param * @param key the key to get * @param tc the transcoder to serialize and unserialize value * @return the result from the cache and CAS id (null if there is none) * @throws OperationTimeoutException if global operation timeout is exceeded * @throws CancellationException if operation was canceled * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASValue gets(String key, Transcoder tc) { try { return asyncGets(key, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { if(e.getCause() instanceof CancellationException) { throw (CancellationException) e.getCause(); } else { throw new RuntimeException("Exception waiting for value", e); } } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value", e); } } /** * Get with a single key and reset its expiration. * * @param * @param key the key to get * @param exp the new expiration for the key * @param tc the transcoder to serialize and unserialize value * @return the result from the cache (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws CancellationException if operation was canceled * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASValue getAndTouch(String key, int exp, Transcoder tc) { try { return asyncGetAndTouch(key, exp, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { if(e.getCause() instanceof CancellationException) { throw (CancellationException) e.getCause(); } else { throw new RuntimeException("Exception waiting for value", e); } } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value", e); } } /** * Get a single key and reset its expiration using the default transcoder. * * @param key the key to get * @param exp the new expiration for the key * @return the result from the cache and CAS id (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASValue getAndTouch(String key, int exp) { return getAndTouch(key, exp, transcoder); } /** * Gets (with CAS support) with a single key using the default transcoder. * * @param key the key to get * @return the result from the cache and CAS id (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public CASValue gets(String key) { return gets(key, transcoder); } /** * Get with a single key. * * @param * @param key the key to get * @param tc the transcoder to serialize and unserialize value * @return the result from the cache (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws CancellationException if operation was canceled * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public T get(String key, Transcoder tc) { try { return asyncGet(key, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for value", e); } catch (ExecutionException e) { if(e.getCause() instanceof CancellationException) { throw (CancellationException) e.getCause(); } else { throw new RuntimeException("Exception waiting for value", e); } } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for value: " + buildTimeoutMessage(operationTimeout, TimeUnit.MILLISECONDS), e); } } /** * Get with a single key and decode using the default transcoder. * * @param key the key to get * @return the result from the cache (null if there is none) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Object get(String key) { return get(key, transcoder); } /** * Asynchronously get a bunch of objects from the cache. * * @param * @param keyIter Iterator that produces keys. * @param tcIter an iterator of transcoders to serialize and unserialize * values; the transcoders are matched with the keys in the same * order. The minimum of the key collection length and number of * transcoders is used and no exception is thrown if they do not * match * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Iterator keyIter, Iterator> tcIter) { final Map> m = new ConcurrentHashMap>(); // This map does not need to be a ConcurrentHashMap // because it is fully populated when it is used and // used only to read the transcoder for a key. final Map> tcMap = new HashMap>(); // Break the gets down into groups by key final Map> chunks = new HashMap>(); final NodeLocator locator = mconn.getLocator(); while (keyIter.hasNext() && tcIter.hasNext()) { String key = keyIter.next(); tcMap.put(key, tcIter.next()); StringUtils.validateKey(key, opFact instanceof BinaryOperationFactory); final MemcachedNode primaryNode = locator.getPrimary(key); MemcachedNode node = null; if (primaryNode.isActive()) { node = primaryNode; } else { for (Iterator i = locator.getSequence(key); node == null && i.hasNext();) { MemcachedNode n = i.next(); if (n.isActive()) { node = n; } } if (node == null) { node = primaryNode; } } assert node != null : "Didn't find a node for " + key; Collection ks = chunks.get(node); if (ks == null) { ks = new ArrayList(); chunks.put(node, ks); } ks.add(key); } final CountDownLatch latch = new CountDownLatch(chunks.size()); final Collection ops = new ArrayList(chunks.size()); final BulkGetFuture rv = new BulkGetFuture(m, ops, latch); GetOperation.Callback cb = new GetOperation.Callback() { @SuppressWarnings("synthetic-access") public void receivedStatus(OperationStatus status) { rv.setStatus(status); } public void gotData(String k, int flags, byte[] data) { Transcoder tc = tcMap.get(k); m.put(k, tcService.decode(tc, new CachedData(flags, data, tc.getMaxSize()))); } public void complete() { latch.countDown(); } }; // Now that we know how many servers it breaks down into, and the latch // is all set up, convert all of these strings collections to operations final Map mops = new HashMap(); for (Map.Entry> me : chunks.entrySet()) { Operation op = opFact.get(me.getValue(), cb); mops.put(me.getKey(), op); ops.add(op); } assert mops.size() == chunks.size(); mconn.checkState(); mconn.addOperations(mops); return rv; } /** * Asynchronously get a bunch of objects from the cache. * * @param * @param keys the keys to request * @param tcIter an iterator of transcoders to serialize and unserialize * values; the transcoders are matched with the keys in the same * order. The minimum of the key collection length and number of * transcoders is used and no exception is thrown if they do not * match * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Collection keys, Iterator> tcIter) { return asyncGetBulk(keys.iterator(), tcIter); } /** * Asynchronously get a bunch of objects from the cache. * * @param * @param keyIter Iterator for the keys to request * @param tc the transcoder to serialize and unserialize values * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Iterator keyIter, Transcoder tc) { return asyncGetBulk(keyIter, new SingleElementInfiniteIterator>(tc)); } /** * Asynchronously get a bunch of objects from the cache. * * @param * @param keys the keys to request * @param tc the transcoder to serialize and unserialize values * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Collection keys, Transcoder tc) { return asyncGetBulk(keys, new SingleElementInfiniteIterator>( tc)); } /** * Asynchronously get a bunch of objects from the cache and decode them with * the given transcoder. * * @param keyIter Iterator that produces the keys to request * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk( Iterator keyIter) { return asyncGetBulk(keyIter, transcoder); } /** * Asynchronously get a bunch of objects from the cache and decode them with * the given transcoder. * * @param keys the keys to request * @return a Future result of that fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Collection keys) { return asyncGetBulk(keys, transcoder); } /** * Varargs wrapper for asynchronous bulk gets. * * @param * @param tc the transcoder to serialize and unserialize value * @param keys one more more keys to get * @return the future values of those keys * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(Transcoder tc, String... keys) { return asyncGetBulk(Arrays.asList(keys), tc); } /** * Varargs wrapper for asynchronous bulk gets with the default transcoder. * * @param keys one more more keys to get * @return the future values of those keys * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public BulkFuture> asyncGetBulk(String... keys) { return asyncGetBulk(Arrays.asList(keys), transcoder); } /** * Get the given key to reset its expiration time. * * @param key the key to fetch * @param exp the new expiration to set for the given key * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture> asyncGetAndTouch(final String key, final int exp) { return asyncGetAndTouch(key, exp, transcoder); } /** * Get the given key to reset its expiration time. * * @param key the key to fetch * @param exp the new expiration to set for the given key * @param tc the transcoder to serialize and unserialize value * @return a future that will hold the return value of the fetch * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture> asyncGetAndTouch(final String key, final int exp, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture> rv = new OperationFuture>( key, latch, operationTimeout); Operation op = opFact.getAndTouch(key, exp, new GetAndTouchOperation.Callback() { private CASValue val = null; public void receivedStatus(OperationStatus status) { rv.set(val, status); } public void complete() { latch.countDown(); } public void gotData(String k, int flags, long cas, byte[] data) { assert k.equals(key) : "Wrong key returned"; assert cas > 0 : "CAS was less than zero: " + cas; val = new CASValue(cas, tc.decode(new CachedData(flags, data, tc.getMaxSize()))); } }); rv.setOperation(op); mconn.enqueueOperation(key, op); return rv; } /** * Get the values for multiple keys from the cache. * * @param * @param keyIter Iterator that produces the keys * @param tc the transcoder to serialize and unserialize value * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws CancellationException if operation was canceled * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Iterator keyIter, Transcoder tc) { try { return asyncGetBulk(keyIter, tc).get(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted getting bulk values", e); } catch (ExecutionException e) { if(e.getCause() instanceof CancellationException) { throw (CancellationException) e.getCause(); } else { throw new RuntimeException("Exception waiting for bulk values", e); } } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting for bulk values: " + buildTimeoutMessage(operationTimeout, TimeUnit.MILLISECONDS), e); } } /** * Get the values for multiple keys from the cache. * * @param keyIter Iterator that produces the keys * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Iterator keyIter) { return getBulk(keyIter, transcoder); } /** * Get the values for multiple keys from the cache. * * @param * @param keys the keys * @param tc the transcoder to serialize and unserialize value * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Collection keys, Transcoder tc) { return getBulk(keys.iterator(), tc); } /** * Get the values for multiple keys from the cache. * * @param keys the keys * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Collection keys) { return getBulk(keys, transcoder); } /** * Get the values for multiple keys from the cache. * * @param * @param tc the transcoder to serialize and unserialize value * @param keys the keys * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(Transcoder tc, String... keys) { return getBulk(Arrays.asList(keys), tc); } /** * Get the values for multiple keys from the cache. * * @param keys the keys * @return a map of the values (for each value that exists) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getBulk(String... keys) { return getBulk(Arrays.asList(keys), transcoder); } /** * Get the versions of all of the connected memcacheds. * * @return a Map of SocketAddress to String for connected servers * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map getVersions() { final Map rv = new ConcurrentHashMap(); CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { final SocketAddress sa = n.getSocketAddress(); return opFact.version(new OperationCallback() { public void receivedStatus(OperationStatus s) { rv.put(sa, s.getMessage()); } public void complete() { latch.countDown(); } }); } }); try { blatch.await(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for versions", e); } return rv; } /** * Get all of the stats from all of the connections. * * @return a Map of a Map of stats replies by SocketAddress * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map> getStats() { return getStats(null); } /** * Get a set of stats from all connections. * * @param arg which stats to get * @return a Map of the server SocketAddress to a map of String stat keys to * String stat values. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public Map> getStats(final String arg) { final Map> rv = new HashMap>(); CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { final SocketAddress sa = n.getSocketAddress(); rv.put(sa, new HashMap()); return opFact.stats(arg, new StatsOperation.Callback() { public void gotStat(String name, String val) { rv.get(sa).put(name, val); } @SuppressWarnings("synthetic-access") public void receivedStatus(OperationStatus status) { if (!status.isSuccess()) { getLogger().warn("Unsuccessful stat fetch: %s", status); } } public void complete() { latch.countDown(); } }); } }); try { blatch.await(operationTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for stats", e); } return rv; } private long mutate(Mutator m, String key, long by, long def, int exp) { final AtomicLong rv = new AtomicLong(); final CountDownLatch latch = new CountDownLatch(1); mconn.enqueueOperation(key, opFact.mutate(m, key, by, def, exp, new OperationCallback() { public void receivedStatus(OperationStatus s) { // XXX: Potential abstraction leak. // The handling of incr/decr in the binary protocol // Allows us to avoid string processing. rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1")); } public void complete() { latch.countDown(); } })); try { if (!latch.await(operationTimeout, TimeUnit.MILLISECONDS)) { throw new OperationTimeoutException("Mutate operation timed out," + "unable to modify counter [" + key + "]"); } } catch (InterruptedException e) { throw new RuntimeException("Interrupted", e); } getLogger().debug("Mutation returned %s", rv); return rv.get(); } /** * Increment the given key by the given amount. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to increment * @return the new value (-1 if the key doesn't exist) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, long by) { return mutate(Mutator.incr, key, by, 0, -1); } /** * Increment the given key by the given amount. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to increment * @return the new value (-1 if the key doesn't exist) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, int by) { return mutate(Mutator.incr, key, (long)by, 0, -1); } /** * Decrement the given key by the given value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the value * @return the new value (-1 if the key doesn't exist) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, long by) { return mutate(Mutator.decr, key, by, 0, -1); } /** * Decrement the given key by the given value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the value * @return the new value (-1 if the key doesn't exist) * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, int by) { return mutate(Mutator.decr, key, (long)by, 0, -1); } /** * Increment the given counter, returning the new value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to increment * @param def the default value (if the counter does not exist) * @param exp the expiration of this object * @return the new value, or -1 if we were unable to increment or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, long by, long def, int exp) { return mutateWithDefault(Mutator.incr, key, by, def, exp); } /** * Increment the given counter, returning the new value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to increment * @param def the default value (if the counter does not exist) * @param exp the expiration of this object * @return the new value, or -1 if we were unable to increment or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, int by, long def, int exp) { return mutateWithDefault(Mutator.incr, key, (long)by, def, exp); } /** * Decrement the given counter, returning the new value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to decrement * @param def the default value (if the counter does not exist) * @param exp the expiration of this object * @return the new value, or -1 if we were unable to decrement or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, long by, long def, int exp) { return mutateWithDefault(Mutator.decr, key, by, def, exp); } /** * Decrement the given counter, returning the new value. * * Due to the way the memcached server operates on items, incremented and * decremented items will be returned as Strings with any operations that * return a value. * * @param key the key * @param by the amount to decrement * @param def the default value (if the counter does not exist) * @param exp the expiration of this object * @return the new value, or -1 if we were unable to decrement or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, int by, long def, int exp) { return mutateWithDefault(Mutator.decr, key, (long)by, def, exp); } private long mutateWithDefault(Mutator t, String key, long by, long def, int exp) { long rv = mutate(t, key, by, def, exp); // The ascii protocol doesn't support defaults, so I added them // manually here. if (rv == -1) { Future f = asyncStore(StoreType.add, key, exp, String.valueOf(def)); try { if (f.get(operationTimeout, TimeUnit.MILLISECONDS)) { rv = def; } else { rv = mutate(t, key, by, 0, exp); assert rv != -1 : "Failed to mutate or init value"; } } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for store", e); } catch (ExecutionException e) { if(e.getCause() instanceof CancellationException) { throw (CancellationException) e.getCause(); } else { throw new RuntimeException("Failed waiting for store", e); } } catch (TimeoutException e) { throw new OperationTimeoutException("Timeout waiting to mutate or init" + " value" + buildTimeoutMessage(operationTimeout, TimeUnit.MILLISECONDS), e); } } return rv; } private OperationFuture asyncMutate(Mutator m, String key, long by, long def, int exp) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); Operation op = opFact.mutate(m, key, by, def, exp, new OperationCallback() { public void receivedStatus(OperationStatus s) { rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1"), s); } public void complete() { latch.countDown(); } }); mconn.enqueueOperation(key, op); rv.setOperation(op); return rv; } /** * Asychronous increment. * * @param key key to increment * @param by the amount to increment the value by * @return a future with the incremented value, or -1 if the increment failed. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncIncr(String key, long by) { return asyncMutate(Mutator.incr, key, by, 0, -1); } /** * Asychronous increment. * * @param key key to increment * @param by the amount to increment the value by * @return a future with the incremented value, or -1 if the increment failed. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncIncr(String key, int by) { return asyncMutate(Mutator.incr, key, (long)by, 0, -1); } /** * Asynchronous decrement. * * @param key key to increment * @param by the amount to increment the value by * @return a future with the decremented value, or -1 if the increment failed. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncDecr(String key, long by) { return asyncMutate(Mutator.decr, key, by, 0, -1); } /** * Asynchronous decrement. * * @param key key to increment * @param by the amount to increment the value by * @return a future with the decremented value, or -1 if the increment failed. * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture asyncDecr(String key, int by) { return asyncMutate(Mutator.decr, key, (long)by, 0, -1); } /** * Increment the given counter, returning the new value. * * @param key the key * @param by the amount to increment * @param def the default value (if the counter does not exist) * @return the new value, or -1 if we were unable to increment or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, long by, long def) { return mutateWithDefault(Mutator.incr, key, by, def, 0); } /** * Increment the given counter, returning the new value. * * @param key the key * @param by the amount to increment * @param def the default value (if the counter does not exist) * @return the new value, or -1 if we were unable to increment or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long incr(String key, int by, long def) { return mutateWithDefault(Mutator.incr, key, (long)by, def, 0); } /** * Decrement the given counter, returning the new value. * * @param key the key * @param by the amount to decrement * @param def the default value (if the counter does not exist) * @return the new value, or -1 if we were unable to decrement or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, long by, long def) { return mutateWithDefault(Mutator.decr, key, by, def, 0); } /** * Decrement the given counter, returning the new value. * * @param key the key * @param by the amount to decrement * @param def the default value (if the counter does not exist) * @return the new value, or -1 if we were unable to decrement or add * @throws OperationTimeoutException if the global operation timeout is * exceeded * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public long decr(String key, int by, long def) { return mutateWithDefault(Mutator.decr, key, (long)by, def, 0); } /** * Delete the given key from the cache. * *

* The hold argument specifies the amount of time in seconds (or Unix time * until which) the client wishes the server to refuse "add" and "replace" * commands with this key. For this amount of item, the item is put into a * delete queue, which means that it won't possible to retrieve it by the * "get" command, but "add" and "replace" command with this key will also fail * (the "set" command will succeed, however). After the time passes, the item * is finally deleted from server memory. *

* * @param key the key to delete * @param hold how long the key should be unavailable to add commands * * @return whether or not the operation was performed * @deprecated Hold values are no longer honored. */ @Deprecated public OperationFuture delete(String key, int hold) { return delete(key); } /** * Delete the given key from the cache. * * @param key the key to delete * @return whether or not the operation was performed * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture delete(String key) { return delete(key, (long) 0); } /** * Delete the given key from the cache of the given CAS value applies. * * @param key the key to delete * @param cas the CAS value to apply. * @return whether or not the operation was performed * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture delete(String key, long cas) { final CountDownLatch latch = new CountDownLatch(1); final OperationFuture rv = new OperationFuture(key, latch, operationTimeout); DeleteOperation.Callback callback = new DeleteOperation.Callback() { public void receivedStatus(OperationStatus s) { rv.set(s.isSuccess(), s); } public void gotData(long cas) { rv.setCas(cas); } public void complete() { latch.countDown(); } }; DeleteOperation op = null; if(cas == 0) { op = opFact.delete(key, callback); } else { op = opFact.delete(key, cas, callback); } rv.setOperation(op); mconn.enqueueOperation(key, op); return rv; } /** * Flush all caches from all servers with a delay of application. * * @param delay the period of time to delay, in seconds * @return whether or not the operation was accepted * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture flush(final int delay) { final AtomicReference flushResult = new AtomicReference(null); final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { Operation op = opFact.flush(delay, new OperationCallback() { public void receivedStatus(OperationStatus s) { flushResult.set(s.isSuccess()); } public void complete() { latch.countDown(); } }); ops.add(op); return op; } }); return new OperationFuture(null, blatch, flushResult, operationTimeout) { @Override public boolean cancel(boolean ign) { boolean rv = false; for (Operation op : ops) { op.cancel(); rv |= op.getState() == OperationState.WRITE_QUEUED; } return rv; } @Override public Boolean get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { status = new OperationStatus(true, "OK"); return super.get(duration, units); } @Override public boolean isCancelled() { boolean rv = false; for (Operation op : ops) { rv |= op.isCancelled(); } return rv; } @Override public boolean isDone() { boolean rv = true; for (Operation op : ops) { rv &= op.getState() == OperationState.COMPLETE; } return rv || isCancelled(); } }; } /** * Flush all caches from all servers immediately. * * @return whether or not the operation was performed * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public OperationFuture flush() { return flush(-1); } public Set listSaslMechanisms() { final ConcurrentMap rv = new ConcurrentHashMap(); CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(MemcachedNode n, final CountDownLatch latch) { return opFact.saslMechs(new OperationCallback() { public void receivedStatus(OperationStatus status) { for (String s : status.getMessage().split(" ")) { rv.put(s, s); } } public void complete() { latch.countDown(); } }); } }); try { blatch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } return rv.keySet(); } /** * Shut down immediately. */ public void shutdown() { shutdown(-1, TimeUnit.MILLISECONDS); } /** * Shut down this client gracefully. * * @param timeout the amount of time time for shutdown * @param unit the TimeUnit for the timeout * @return result of the shutdown request */ public boolean shutdown(long timeout, TimeUnit unit) { // Guard against double shutdowns (bug 8). if (shuttingDown) { getLogger().info("Suppressing duplicate attempt to shut down"); return false; } shuttingDown = true; String baseName = mconn.getName(); mconn.setName(baseName + " - SHUTTING DOWN"); boolean rv = true; try { // Conditionally wait if (timeout > 0) { mconn.setName(baseName + " - SHUTTING DOWN (waiting)"); rv = waitForQueues(timeout, unit); } } finally { // But always begin the shutdown sequence try { mconn.setName(baseName + " - SHUTTING DOWN (telling client)"); mconn.shutdown(); mconn.setName(baseName + " - SHUTTING DOWN (informed client)"); tcService.shutdown(); } catch (IOException e) { getLogger().warn("exception while shutting down", e); } } return rv; } /** * Wait for the queues to die down. * * @param timeout the amount of time time for shutdown * @param unit the TimeUnit for the timeout * @return result of the request for the wait * @throws IllegalStateException in the rare circumstance where queue is too * full to accept any more requests */ public boolean waitForQueues(long timeout, TimeUnit unit) { CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { return opFact.noop(new OperationCallback() { public void complete() { latch.countDown(); } public void receivedStatus(OperationStatus s) { // Nothing special when receiving status, only // necessary to complete the interface } }); } }, mconn.getLocator().getAll(), false); try { // XXX: Perhaps IllegalStateException should be caught here // and the check retried. return blatch.await(timeout, unit); } catch (InterruptedException e) { throw new RuntimeException("Interrupted waiting for queues", e); } } /** * Add a connection observer. * * If connections are already established, your observer will be called with * the address and -1. * * @param obs the ConnectionObserver you wish to add * @return true if the observer was added. */ public boolean addObserver(ConnectionObserver obs) { boolean rv = mconn.addObserver(obs); if (rv) { for (MemcachedNode node : mconn.getLocator().getAll()) { if (node.isActive()) { obs.connectionEstablished(node.getSocketAddress(), -1); } } } return rv; } /** * Remove a connection observer. * * @param obs the ConnectionObserver you wish to add * @return true if the observer existed, but no longer does */ public boolean removeObserver(ConnectionObserver obs) { return mconn.removeObserver(obs); } public void connectionEstablished(SocketAddress sa, int reconnectCount) { if (authDescriptor != null) { if (authDescriptor.authThresholdReached()) { this.shutdown(); } authMonitor.authConnection(mconn, opFact, authDescriptor, findNode(sa)); } } private MemcachedNode findNode(SocketAddress sa) { MemcachedNode node = null; for (MemcachedNode n : mconn.getLocator().getAll()) { if (n.getSocketAddress().equals(sa)) { node = n; } } assert node != null : "Couldn't find node connected to " + sa; return node; } private String buildTimeoutMessage(long timeWaited, TimeUnit unit) { StringBuilder message = new StringBuilder(); message.append(MessageFormat.format("waited {0} ms.", unit.convert(timeWaited, TimeUnit.MILLISECONDS))); message.append(" Node status: ").append(mconn.connectionsStatus()); return message.toString(); } public void connectionLost(SocketAddress sa) { // Don't care. } @Override public String toString() { return connFactory.toString(); } }