Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
net.spy.memcached.ArcusClient Maven / Gradle / Ivy
/*
* arcus-java-client : Arcus Java client
* Copyright 2010-2014 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.spy.memcached;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import net.spy.memcached.collection.Attributes;
import net.spy.memcached.collection.BKeyObject;
import net.spy.memcached.collection.BTreeCount;
import net.spy.memcached.collection.BTreeCreate;
import net.spy.memcached.collection.BTreeDelete;
import net.spy.memcached.collection.BTreeElement;
import net.spy.memcached.collection.BTreeFindPosition;
import net.spy.memcached.collection.BTreeFindPositionWithGet;
import net.spy.memcached.collection.BTreeGet;
import net.spy.memcached.collection.BTreeGetBulk;
import net.spy.memcached.collection.BTreeGetBulkWithByteTypeBkey;
import net.spy.memcached.collection.BTreeGetBulkWithLongTypeBkey;
import net.spy.memcached.collection.BTreeGetByPosition;
import net.spy.memcached.collection.BTreeGetResult;
import net.spy.memcached.collection.BTreeMutate;
import net.spy.memcached.collection.BTreeOrder;
import net.spy.memcached.collection.BTreeSMGet;
import net.spy.memcached.collection.BTreeSMGetWithByteTypeBkey;
import net.spy.memcached.collection.BTreeSMGetWithByteTypeBkeyOld;
import net.spy.memcached.collection.BTreeSMGetWithLongTypeBkey;
import net.spy.memcached.collection.BTreeSMGetWithLongTypeBkeyOld;
import net.spy.memcached.collection.BTreeStore;
import net.spy.memcached.collection.BTreeStoreAndGet;
import net.spy.memcached.collection.BTreeUpdate;
import net.spy.memcached.collection.BTreeUpsert;
import net.spy.memcached.collection.ByteArrayBKey;
import net.spy.memcached.collection.ByteArrayTreeMap;
import net.spy.memcached.collection.CollectionAttributes;
import net.spy.memcached.collection.CollectionBulkStore;
import net.spy.memcached.collection.CollectionCount;
import net.spy.memcached.collection.CollectionCreate;
import net.spy.memcached.collection.CollectionDelete;
import net.spy.memcached.collection.CollectionExist;
import net.spy.memcached.collection.CollectionGet;
import net.spy.memcached.collection.CollectionMutate;
import net.spy.memcached.collection.CollectionPipedStore;
import net.spy.memcached.collection.CollectionPipedStore.BTreePipedStore;
import net.spy.memcached.collection.CollectionPipedStore.ByteArraysBTreePipedStore;
import net.spy.memcached.collection.CollectionPipedStore.ListPipedStore;
import net.spy.memcached.collection.CollectionPipedStore.SetPipedStore;
import net.spy.memcached.collection.CollectionPipedUpdate;
import net.spy.memcached.collection.CollectionPipedUpdate.BTreePipedUpdate;
import net.spy.memcached.collection.CollectionResponse;
import net.spy.memcached.collection.CollectionStore;
import net.spy.memcached.collection.CollectionUpdate;
import net.spy.memcached.collection.Element;
import net.spy.memcached.collection.ElementFlagFilter;
import net.spy.memcached.collection.ElementFlagUpdate;
import net.spy.memcached.collection.ElementValueType;
import net.spy.memcached.collection.ExtendedBTreeGet;
import net.spy.memcached.collection.ListCreate;
import net.spy.memcached.collection.ListDelete;
import net.spy.memcached.collection.ListGet;
import net.spy.memcached.collection.ListStore;
import net.spy.memcached.collection.SMGetElement;
import net.spy.memcached.collection.SMGetTrimKey;
import net.spy.memcached.collection.SMGetMode;
import net.spy.memcached.collection.SetCreate;
import net.spy.memcached.collection.SetDelete;
import net.spy.memcached.collection.SetExist;
import net.spy.memcached.collection.SetGet;
import net.spy.memcached.collection.SetPipedExist;
import net.spy.memcached.collection.SetStore;
import net.spy.memcached.compat.log.Logger;
import net.spy.memcached.compat.log.LoggerFactory;
import net.spy.memcached.internal.BTreeStoreAndGetFuture;
import net.spy.memcached.internal.CheckedOperationTimeoutException;
import net.spy.memcached.internal.CollectionFuture;
import net.spy.memcached.internal.CollectionGetBulkFuture;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.internal.SMGetFuture;
import net.spy.memcached.ops.BTreeFindPositionOperation;
import net.spy.memcached.ops.BTreeFindPositionWithGetOperation;
import net.spy.memcached.ops.BTreeGetBulkOperation;
import net.spy.memcached.ops.BTreeGetByPositionOperation;
import net.spy.memcached.ops.BTreeSortMergeGetOperation;
import net.spy.memcached.ops.BTreeSortMergeGetOperationOld;
import net.spy.memcached.ops.BTreeStoreAndGetOperation;
import net.spy.memcached.ops.CollectionBulkStoreOperation;
import net.spy.memcached.ops.CollectionGetOperation;
import net.spy.memcached.ops.CollectionOperationStatus;
import net.spy.memcached.ops.CollectionPipedExistOperation;
import net.spy.memcached.ops.CollectionPipedStoreOperation;
import net.spy.memcached.ops.CollectionPipedUpdateOperation;
import net.spy.memcached.ops.ExtendedBTreeGetOperation;
import net.spy.memcached.ops.GetAttrOperation;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StoreType;
import net.spy.memcached.plugin.FrontCacheMemcachedClient;
import net.spy.memcached.transcoders.CollectionTranscoder;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.util.BTreeUtil;
/**
* Client to a Arcus.
*
* Basic usage
*
*
* final static String arcusAdminAddrs = "127.0.0.1:2181";
* final static String serviceCode = "cafe";
*
* ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder();
*
* ArcusClient c = ArcusClient.createArcusClient(arcusAdminAddrs, serviceCode, cfb);
*
* // Store a value (async) for one hour
* c.set("someKey", 3600, someObject);
* // Retrieve a value.
* Future<Object> myFuture = c.asyncGet("someKey");
*
* If pool style is needed, it will be used as follows
*
* int poolSize = 4;
* ArcusClientPool pool = ArcusClient.createArcusClientPool(arcusAdminAddrs, serviceCode, cfb, poolSize);
*
* // Store a value
* pool.set("someKey", 3600, someObject);
* // Retrieve a value
* Future<Object> myFuture = pool.asyncGet("someKey");
*
*
*
*/
public class ArcusClient extends FrontCacheMemcachedClient implements ArcusClientIF {
static String VERSION;
static Logger arcusLogger = LoggerFactory.getLogger("net.spy.memcached");
static final String ARCUS_CLOUD_ADDR = "127.0.0.1:2181";
public boolean dead;
final BulkService bulkService;
final Transcoder collectionTranscoder;
final int smgetKeyChunkSize;
static final int BOPGET_BULK_CHUNK_SIZE = 200;
static final int NON_PIPED_BULK_INSERT_CHUNK_SIZE = 500;
static final int MAX_GETBULK_KEY_COUNT = 200;
static final int MAX_GETBULK_ELEMENT_COUNT = 50;
static final int MAX_SMGET_COUNT = 1000; // server configuration is 2000.
private CacheManager cacheManager;
public void setCacheManager(CacheManager cacheManager) {
this.cacheManager = cacheManager;
}
/**
*
* @param hostPorts
* arcus admin addresses
* @param serviceCode
* service code
* @param cfb
* ConnectionFactoryBuilder
* @return a single ArcusClient
*/
public static ArcusClient createArcusClient(String hostPorts, String serviceCode,
ConnectionFactoryBuilder cfb) {
return ArcusClient.createArcusClient(hostPorts, serviceCode, cfb, 1, 10000).getClient();
}
/**
*
* @param serviceCode
* service code
* @param cfb
* ConnectionFactoryBuilder
* @return a single ArcusClient
*/
public static ArcusClient createArcusClient(String serviceCode,
ConnectionFactoryBuilder cfb) {
return ArcusClient.createArcusClient(ARCUS_CLOUD_ADDR, serviceCode, cfb, 1, 10000).getClient();
}
/**
*
* @param hostPorts
* arcus admin addresses
* @param serviceCode
* service code
* @param poolSize
* Arcus clinet pool size
* @param cfb
* ConnectionFactoryBuilder
* @return multiple ArcusClient
*
*/
public static ArcusClientPool createArcusClientPool(String hostPorts, String serviceCode,
ConnectionFactoryBuilder cfb, int poolSize) {
return ArcusClient.createArcusClient(hostPorts, serviceCode, cfb, poolSize, 0);
}
/**
*
* @param serviceCode
* service code
* @param poolSize
* Arcus clinet pool size
* @param cfb
* ConnectionFactoryBuilder
* @return multiple ArcusClient
*
*/
public static ArcusClientPool createArcusClientPool(String serviceCode,
ConnectionFactoryBuilder cfb, int poolSize) {
return ArcusClient.createArcusClient(ARCUS_CLOUD_ADDR, serviceCode, cfb, poolSize, 0);
}
/**
*
* @param hostPorts
* arcus admin addresses
* @param serviceCode
* service code
* @param cfb
* ConnectionFactoryBuilder
* @param poolSize
* Arcus clinet pool size
* @param waitTimeFor Connect
* waiting time for connection establishment(milliseconds)
*
* @return multiple ArcusClient
*/
private static ArcusClientPool createArcusClient(String hostPorts, String serviceCode,
ConnectionFactoryBuilder cfb, int poolSize, int waitTimeForConnect) {
if (hostPorts == null) {
throw new NullPointerException("Arcus admin address required.");
}
if (serviceCode == null) {
throw new NullPointerException("Service code required.");
}
if (hostPorts.isEmpty()) {
throw new IllegalArgumentException("Arcus admin address is empty.");
}
if (serviceCode.isEmpty()) {
throw new IllegalArgumentException("Service code is empty.");
}
if (VERSION == null) {
VERSION = getVersion();
}
final CountDownLatch latch = new CountDownLatch(1);
CacheManager exe = new CacheManager(
hostPorts, serviceCode, cfb, latch, poolSize,
waitTimeForConnect);
try {
latch.await();
} catch (Exception e) {
arcusLogger.warn("you cannot see this message!");
}
ArcusClient[] client = exe.getAC();
return new ArcusClientPool(poolSize, client);
}
/**
* Create an Arcus client for the given memcached server addresses.
*
* @param cf connection factory to configure connections for this client
* @param addrs socket addresses for the memcached servers
* @return Arcus client
*/
protected static ArcusClient getInstance(ConnectionFactory cf,
List addrs) throws IOException {
return new ArcusClient(cf, addrs);
}
/**
* Create an Arcus client for the given memcached server addresses.
*
* @param cf connection factory to configure connections for this client
* @param addrs socket addresses for the memcached servers
* @throws IOException if connections cannot be established
*/
public ArcusClient(ConnectionFactory cf, List addrs)
throws IOException {
super(cf, addrs);
bulkService = new BulkService(cf.getBulkServiceLoopLimit(),
cf.getBulkServiceThreadCount(), cf.getBulkServiceSingleOpTimeout());
collectionTranscoder = new CollectionTranscoder();
smgetKeyChunkSize = cf.getDefaultMaxSMGetKeyChunkSize();
registerMbean();
}
/**
* Register mbean for Arcus client statistics.
*/
private void registerMbean() {
if ("false".equals(System.getProperty("arcus.mbean", "false")
.toLowerCase())) {
getLogger().info("Arcus client statistics MBean is NOT registered.");
return;
}
try {
StatisticsHandler mbean = new StatisticsHandler(this);
ArcusMBeanServer.getInstance().registMBean(
mbean,
mbean.getClass().getPackage().getName() + ":type="
+ mbean.getClass().getSimpleName() + "-"
+ mbean.hashCode());
getLogger().info("Arcus client statistics MBean is registered.");
} catch (Exception e) {
getLogger().warn("Failed to initialize statistics mbean.", e);
}
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#shutdown()
*/
@Override
public void shutdown() {
super.shutdown();
// Connect to Arcus server directly, cache manager may be null.
if (cacheManager != null) {
cacheManager.shutdown();
}
dead = true;
if (bulkService != null) {
bulkService.shutdown();
}
}
Future asyncStore(StoreType storeType, String key,
int exp, CachedData co) {
final CountDownLatch latch=new CountDownLatch(1);
final OperationFuture rv=new OperationFuture(latch,
operationTimeout);
Operation op=opFact.store(storeType, key, co.getFlags(),
exp, co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus val) {
rv.set(val.isSuccess());
}
public void complete() {
latch.countDown();
}});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#asyncSetAttr(java.lang.String, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture asyncSetAttr(String key,
Attributes attrs) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.setAttr(key, attrs, new OperationCallback() {
public void receivedStatus(OperationStatus status) {
if (status instanceof CollectionOperationStatus) {
rv.set(status.isSuccess(), (CollectionOperationStatus) status);
} else {
getLogger().warn("Unhandled state: " + status);
rv.set(status.isSuccess(), new CollectionOperationStatus(status));
}
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#asyncGetAttr(java.lang.String)
*/
@Override
public CollectionFuture asyncGetAttr(final String key) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.getAttr(key, new GetAttrOperation.Callback() {
CollectionAttributes attrs = new CollectionAttributes();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus stat;
if (status instanceof CollectionOperationStatus) {
stat = (CollectionOperationStatus) status;
} else {
stat = new CollectionOperationStatus(status);
}
rv.set(stat.isSuccess() ? attrs : null, stat);
}
public void complete() {
latch.countDown();
}
public void gotAttribute(String k, String attr) {
assert key.equals(k) : "Wrong key returned";
attrs.setAttribute(attr);
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic get operation for list items. Public methods for list items call this method.
*
* @param k list item's key
* @param collectionGet operation parameters (element key and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the fetched value
*/
private CollectionFuture> asyncLopGet(final String k,
final CollectionGet collectionGet, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv = new CollectionFuture>(
latch, operationTimeout);
Operation op = opFact.collectionGet(k, collectionGet,
new CollectionGetOperation.Callback() {
List list = new ArrayList();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(list, cstatus);
return;
}
switch (cstatus.getResponse()) {
case NOT_FOUND:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) not found : %s", k,
cstatus);
}
break;
case NOT_FOUND_ELEMENT:
rv.set(list, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found : %s",
k, cstatus);
}
break;
case OUT_OF_RANGE:
rv.set(list, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found in condition : %s",
k, cstatus);
}
break;
case UNREADABLE:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) is not readable : %s",
k, cstatus);
}
break;
default:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) unknown status : %s",
k, cstatus);
}
break;
}
}
public void complete() {
latch.countDown();
}
public void gotData(String key, long subkey, int flags,
byte[] data) {
assert key.equals(k) : "Wrong key returned";
list.add(tc.decode(new CachedData(flags, data, tc
.getMaxSize())));
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#asyncSopExist(java.lang.String, T, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture asyncSopExist(String key, T value,
Transcoder tc) {
SetExist exist = new SetExist();
exist.setValue(value);
return asyncCollectionExist(key, "", exist, tc);
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#asyncSopExist(java.lang.String, java.lang.Object)
*/
@Override
public CollectionFuture asyncSopExist(String key, Object value) {
SetExist exist = new SetExist();
exist.setValue(value);
return asyncCollectionExist(key, "", exist, collectionTranscoder);
}
/**
* Generic get operation for set items. Public methods for set items call this method.
*
* @param k set item's key
* @param collectionGet operation parameters (element key and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the fetched value
*/
private CollectionFuture> asyncSopGet(final String k,
final CollectionGet collectionGet, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv = new CollectionFuture>(latch,
operationTimeout);
Operation op = opFact.collectionGet(k, collectionGet,
new CollectionGetOperation.Callback() {
Set set = new HashSet();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(set, cstatus);
return;
}
switch (cstatus.getResponse()) {
case NOT_FOUND:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) not found : %s", k,
cstatus);
}
break;
case NOT_FOUND_ELEMENT:
rv.set(set, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found : %s",
k, cstatus);
}
break;
case UNREADABLE:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Collection(%s) is not readable : %s",
k, cstatus);
}
break;
default:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) unknown status : %s",
k, cstatus);
}
break;
}
}
public void complete() {
latch.countDown();
}
public void gotData(String key, long subkey, int flags,
byte[] data) {
assert key.equals(k) : "Wrong key returned";
set.add(tc.decode(new CachedData(flags, data, tc
.getMaxSize())));
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
/**
* Generic get operation for b+tree items. Public methods for b+tree items call this method.
*
* @param k b+tree item's key
* @param collectionGet operation parameters (element keys and so on)
* @param reverse false=forward or true=backward
* @param tc transcoder to serialize and unserialize value
* @return future holding the map of fetched elements and their keys
*/
private CollectionFuture>> asyncBopGet(
final String k, final CollectionGet collectionGet,
final boolean reverse, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture>> rv = new CollectionFuture>>(
latch, operationTimeout);
Operation op = opFact.collectionGet(k, collectionGet,
new CollectionGetOperation.Callback() {
TreeMap> map = new TreeMap>(
(reverse) ? Collections.reverseOrder() : null);
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(map, cstatus);
return;
}
switch (cstatus.getResponse()) {
case NOT_FOUND:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) not found : %s", k,
cstatus);
}
break;
case NOT_FOUND_ELEMENT:
rv.set(map, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found : %s",
k, cstatus);
}
break;
case UNREADABLE:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) is not readable : %s",
k, cstatus);
}
break;
default:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) Unknown response : %s",
k, cstatus);
}
break;
}
}
public void complete() {
latch.countDown();
}
public void gotData(String key, long subkey, int flags,
byte[] data) {
assert key.equals(k) : "Wrong key returned";
map.put(subkey,
new Element(subkey, tc
.decode(new CachedData(flags, data, tc
.getMaxSize())), collectionGet
.getElementFlag()));
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
/**
* Generic store operation for collection items. Public methods for collection items call this method.
*
* @param key collection item's key
* @param subkey element key (list index, b+tree bkey)
* @param collectionStore operation parameters (value, eflags, attributes, and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the success/failure of the operation
*/
private CollectionFuture asyncCollectionStore(String key,
String subkey, CollectionStore collectionStore, Transcoder tc) {
CachedData co = tc.encode(collectionStore.getValue());
collectionStore.setFlags(co.getFlags());
return asyncCollectionStore(key, subkey, collectionStore, co);
}
/**
* Generic store operation for collection items. Public methods for collection items call this method.
*
* @param key collection item's key
* @param subkey element key (list index, b+tree bkey)
* @param collectionStore operation parameters (value, eflags, attributes, and so on)
* @param co transcoded value
* @return future holding the success/failure of the operation
*/
CollectionFuture asyncCollectionStore(final String key,
final String subkey, final CollectionStore collectionStore,
final CachedData co) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionStore(key, subkey, collectionStore,
co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(cstatus.isSuccess(), cstatus);
if (!cstatus.isSuccess()
&& getLogger().isDebugEnabled()) {
getLogger().debug(
"Insertion to the collection failed : "
+ cstatus.getMessage()
+ " (type="
+ collectionStore.getClass()
.getName() + ", key=" + key
+ ", subkey=" + subkey + ", value="
+ collectionStore.getValue() + ")");
}
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic pipelined store operation for collection items. Public methods for collection items call this method.
*
* @param key collection item's key
* @param store operation parameters (values, attributes, and so on)
* @return future holding the success/failure codes of individual operations and their index
*/
CollectionFuture> asyncCollectionPipedStore(
final String key, final CollectionPipedStore store) {
if (store.getItemCount() == 0) {
throw new IllegalArgumentException(
"The number of piped operations must be larger than 0.");
}
if (store.getItemCount() > CollectionPipedStore.MAX_PIPED_ITEM_COUNT) {
throw new IllegalArgumentException(
"The number of piped operations must not exceed a maximum of "
+ CollectionPipedStore.MAX_PIPED_ITEM_COUNT + ".");
}
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv =
new CollectionFuture>(latch, operationTimeout);
Operation op = opFact.collectionPipedStore(key, store,
new CollectionPipedStoreOperation.Callback() {
Map result =
new TreeMap();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(result, cstatus);
}
public void complete() {
latch.countDown();
}
public void gotStatus(Integer index, OperationStatus status) {
if (status instanceof CollectionOperationStatus) {
result.put(index, (CollectionOperationStatus) status);
} else {
result.put(index, new CollectionOperationStatus(status));
}
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic pipelined update operation for collection items. Public methods for collection items call this method.
*
* @param key collection item's key
* @param update operation parameters (values and so on)
* @return future holding the success/failure codes of individual operations and their index
*/
CollectionFuture> asyncCollectionPipedUpdate(
final String key, final CollectionPipedUpdate update) {
if (update.getItemCount() == 0) {
throw new IllegalArgumentException(
"The number of piped operations must be larger than 0.");
}
if (update.getItemCount() > CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT) {
throw new IllegalArgumentException(
"The number of piped operations must not exceed a maximum of "
+ CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT + ".");
}
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv = new CollectionFuture>(
latch, operationTimeout);
Operation op = opFact.collectionPipedUpdate(key, update,
new CollectionPipedUpdateOperation.Callback() {
Map result = new TreeMap();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(result, cstatus);
}
public void complete() {
latch.countDown();
}
public void gotStatus(Integer index, OperationStatus status) {
if (status instanceof CollectionOperationStatus) {
result.put(index,
(CollectionOperationStatus) status);
} else {
result.put(index, new CollectionOperationStatus(
status));
}
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic pipelined update operation for collection items. Public methods for collection items call this method.
*
* @param key collection item's key
* @param updateList list of operation parameters (values and so on)
* @return future holding the success/failure codes of individual operations and their index
*/
CollectionFuture> asyncCollectionPipedUpdate(
final String key, final List> updateList) {
final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue();
final CountDownLatch latch = new CountDownLatch(updateList.size());
final List mergedOperationStatus = Collections
.synchronizedList(new ArrayList(1));
final Map mergedResult = new ConcurrentHashMap();
for (int i = 0; i < updateList.size(); i++) {
final CollectionPipedUpdate update = updateList.get(i);
final int idx = i;
Operation op = opFact.collectionPipedUpdate(key, update,
new CollectionPipedUpdateOperation.Callback() {
// each result status
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
mergedOperationStatus.add(cstatus);
}
// complete
public void complete() {
latch.countDown();
}
// got status
public void gotStatus(Integer index,
OperationStatus status) {
if (status instanceof CollectionOperationStatus) {
mergedResult
.put(index
+ (idx * CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT),
(CollectionOperationStatus) status);
} else {
mergedResult
.put(index
+ (idx * CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT),
new CollectionOperationStatus(
status));
}
}
});
addOp(key, op);
ops.add(op);
}
return new CollectionFuture>(
latch, operationTimeout) {
@Override
public boolean cancel(boolean ign) {
boolean rv = false;
for (Operation op : ops) {
op.cancel("by application.");
rv |= op.getState() == OperationState.WRITING;
}
return rv;
}
@Override
public boolean isCancelled() {
for (Operation op : ops) {
if (op.isCancelled())
return true;
}
return false;
}
@Override
public Map get(long duration,
TimeUnit units) throws InterruptedException,
TimeoutException, ExecutionException {
if (!latch.await(duration, units)) {
for (Operation op : ops) {
MemcachedConnection.opTimedOut(op);
}
throw new CheckedOperationTimeoutException(
"Timed out waiting for operation", ops);
} else {
// continuous timeout counter will be reset
for (Operation op : ops) {
MemcachedConnection.opSucceeded(op);
}
}
for (Operation op : ops) {
if (op != null && op.hasErrored()) {
throw new ExecutionException(op.getException());
}
if (op.isCancelled()) {
throw new ExecutionException(new RuntimeException(op.getCancelCause()));
}
}
return mergedResult;
}
@Override
public CollectionOperationStatus getOperationStatus() {
for (OperationStatus status : mergedOperationStatus) {
if (!status.isSuccess()) {
return new CollectionOperationStatus(status);
}
}
return new CollectionOperationStatus(true, "END",
CollectionResponse.END);
}
};
}
/**
* Generic delete operation for collection items. Public methods for collection items call this method.
*
* @param key collection item's key
* @param collectionDelete operation parameters (element index/key, value, and so on)
* @return future holding the success/failure of the operation
*/
private CollectionFuture asyncCollectionDelete(
final String key, final CollectionDelete collectionDelete) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionDelete(key, collectionDelete,
new OperationCallback() {
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(cstatus.isSuccess(), cstatus);
if (!cstatus.isSuccess()
&& getLogger().isDebugEnabled()) {
getLogger().debug(
"Deletion to the collection failed : "
+ cstatus.getMessage()
+ " (type="
+ collectionDelete.getClass()
.getName() + ", key=" + key
+ ")");
}
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic existence operation for collection items. Public methods for collection items call this method.
*
* @param key collection item's key
* @param subkey element key (list index, b+tree bkey)
* @param collectionExist operation parameters (element value and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the success/failure of the operation
*/
private CollectionFuture asyncCollectionExist(
final String key, final String subkey,
final CollectionExist collectionExist, Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
CachedData cd = tc.encode(collectionExist.getValue());
collectionExist.setData(cd.getData());
Operation op = opFact.collectionExist(key, subkey, collectionExist,
new OperationCallback() {
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
boolean isExist = (CollectionResponse.EXIST == cstatus
.getResponse()) ? true : false;
rv.set(isExist, cstatus);
if (!cstatus.isSuccess()
&& getLogger().isDebugEnabled()) {
getLogger().debug(
"Exist command to the collection failed : "
+ cstatus.getMessage()
+ " (type="
+ collectionExist.getClass()
.getName() + ", key=" + key
+ ", subkey=" + subkey + ")");
}
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.List, int, T, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public Future> asyncSetBulk(List key, int exp, T o, Transcoder tc) {
return bulkService.setBulk(key, exp, o, tc, new ArcusClient[] { this });
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.List, int, java.lang.Object)
*/
@Override
public Future> asyncSetBulk(List key, int exp, Object o) {
return asyncSetBulk(key, exp, o, transcoder);
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.Map, int, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public Future> asyncSetBulk(Map o, int exp, Transcoder tc) {
return bulkService.setBulk(o, exp, tc, new ArcusClient[] { this });
}
/* (non-Javadoc)
* @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.Map, int)
*/
@Override
public Future> asyncSetBulk(Map o, int exp) {
return asyncSetBulk(o, exp, transcoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#getMaxPipedItemCount()
*/
@Override
public int getMaxPipedItemCount() {
return CollectionPipedStore.MAX_PIPED_ITEM_COUNT;
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopCreate(java.lang.String, net.spy.memcached.collection.ElementValueType, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture asyncBopCreate(String key,
ElementValueType valueType, CollectionAttributes attributes) {
int flag = CollectionTranscoder.examineFlags(valueType);
boolean noreply = false;
CollectionCreate bTreeCreate = new BTreeCreate(flag,
attributes.getExpireTime(), attributes.getMaxCount(),
attributes.getOverflowAction(), attributes.getReadable(), noreply);
return asyncCollectionCreate(key, bTreeCreate);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopCreate(java.lang.String, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture asyncSopCreate(String key,
ElementValueType type, CollectionAttributes attributes) {
int flag = CollectionTranscoder.examineFlags(type);
boolean noreply = false;
CollectionCreate setCreate = new SetCreate(flag,
attributes.getExpireTime(), attributes.getMaxCount(), attributes.getReadable(), noreply);
return asyncCollectionCreate(key, setCreate);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopCreate(java.lang.String, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture asyncLopCreate(String key,
ElementValueType type, CollectionAttributes attributes) {
int flag = CollectionTranscoder.examineFlags(type);
boolean noreply = false;
CollectionCreate listCreate = new ListCreate(flag,
attributes.getExpireTime(), attributes.getMaxCount(),
attributes.getOverflowAction(), attributes.getReadable(), noreply);
return asyncCollectionCreate(key, listCreate);
}
/**
* Generic create operation for collection items. Public methods for collection items call this method.
*
* @param key collection item's key
* @param collectionCreate operation parameters (flags, expiration time, and so on)
* @return future holding the success/failure of the operation
*/
CollectionFuture asyncCollectionCreate(final String key,
final CollectionCreate collectionCreate) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionCreate(key, collectionCreate,
new OperationCallback() {
@Override
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(cstatus.isSuccess(), cstatus);
if (!cstatus.isSuccess()
&& getLogger().isDebugEnabled()) {
getLogger()
.debug("Insertion to the collection failed : "
+ cstatus.getMessage()
+ " (type="
+ collectionCreate.getClass()
.getName()
+ ", key="
+ key
+ ", attribute="
+ collectionCreate.toString() + ")");
}
}
@Override
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, boolean, boolean)
*/
@Override
public CollectionFuture>> asyncBopGet(String key,
long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty) {
BTreeGet get = new BTreeGet(bkey, withDelete, dropIfEmpty, eFlagFilter);
return asyncBopGet(key, get, false, collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, long, int, int, boolean, boolean)
*/
@Override
public CollectionFuture>> asyncBopGet(String key,
long from, long to, ElementFlagFilter eFlagFilter, int offset, int count,
boolean withDelete, boolean dropIfEmpty) {
BTreeGet get = new BTreeGet(from, to, offset, count,
withDelete, dropIfEmpty, eFlagFilter);
boolean reverse = from > to;
return asyncBopGet(key, get, reverse, collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, boolean, boolean, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture>> asyncBopGet(String key,
long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty, Transcoder tc) {
BTreeGet get = new BTreeGet(bkey, withDelete, dropIfEmpty, eFlagFilter);
return asyncBopGet(key, get, false, tc);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, long, int, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture>> asyncBopGet(String key,
long from, long to, ElementFlagFilter eFlagFilter, int offset, int count,
boolean withDelete, boolean dropIfEmpty, Transcoder tc) {
BTreeGet get = new BTreeGet(from, to, offset, count, withDelete,
dropIfEmpty, eFlagFilter);
boolean reverse = from > to;
return asyncBopGet(key, get, reverse, tc);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, boolean, boolean)
*/
@Override
public CollectionFuture> asyncLopGet(String key, int index,
boolean withDelete, boolean dropIfEmpty) {
ListGet get = new ListGet(index, withDelete, dropIfEmpty);
return asyncLopGet(key, get, collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, int, boolean, boolean)
*/
@Override
public CollectionFuture> asyncLopGet(String key, int from,
int to, boolean withDelete, boolean dropIfEmpty) {
ListGet get = new ListGet(from, to, withDelete, dropIfEmpty);
return asyncLopGet(key, get, collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture> asyncLopGet(String key, int index,
boolean withDelete, boolean dropIfEmpty, Transcoder tc) {
ListGet get = new ListGet(index, withDelete, dropIfEmpty);
return asyncLopGet(key, get, tc);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture> asyncLopGet(String key, int from,
int to, boolean withDelete, boolean dropIfEmpty, Transcoder tc) {
ListGet get = new ListGet(from, to, withDelete, dropIfEmpty);
return asyncLopGet(key, get, tc);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopGet(java.lang.String, int, boolean, boolean)
*/
@Override
public CollectionFuture> asyncSopGet(String key, int count,
boolean withDelete, boolean dropIfEmpty) {
SetGet get = new SetGet(count, withDelete, dropIfEmpty);
return asyncSopGet(key, get, collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopGet(java.lang.String, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture> asyncSopGet(String key, int count,
boolean withDelete, boolean dropIfEmpty, Transcoder tc) {
SetGet get = new SetGet(count, withDelete, dropIfEmpty);
return asyncSopGet(key, get, tc);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, long, boolean)
*/
@Override
public CollectionFuture asyncBopDelete(String key, long bkey,
ElementFlagFilter eFlagFilter, boolean dropIfEmpty) {
BTreeDelete delete = new BTreeDelete(bkey, false,
dropIfEmpty, eFlagFilter);
return asyncCollectionDelete(key, delete);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, long, long, int, boolean)
*/
@Override
public CollectionFuture asyncBopDelete(String key, long from,
long to, ElementFlagFilter eFlagFilter, int count, boolean dropIfEmpty) {
BTreeDelete delete = new BTreeDelete(from, to, count,
false, dropIfEmpty, eFlagFilter);
return asyncCollectionDelete(key, delete);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopDelete(java.lang.String, int, boolean)
*/
@Override
public CollectionFuture asyncLopDelete(String key, int index,
boolean dropIfEmpty) {
ListDelete delete = new ListDelete(index, false,
dropIfEmpty);
return asyncCollectionDelete(key, delete);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopDelete(java.lang.String, int, int, boolean)
*/
@Override
public CollectionFuture asyncLopDelete(String key, int from,
int to, boolean dropIfEmpty) {
ListDelete delete = new ListDelete(from, to, false,
dropIfEmpty);
return asyncCollectionDelete(key, delete);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopDelete(java.lang.String, java.lang.Object, boolean)
*/
@Override
public CollectionFuture asyncSopDelete(String key, Object value,
boolean dropIfEmpty) {
SetDelete delete = new SetDelete(value, false,
dropIfEmpty);
delete.setData(collectionTranscoder.encode(value).getData());
return asyncCollectionDelete(key, delete);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopDelete(java.lang.String, java.lang.Object, boolean, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture asyncSopDelete(String key, T value,
boolean dropIfEmpty, Transcoder tc) {
SetDelete delete = new SetDelete(value, false, dropIfEmpty);
delete.setData(tc.encode(value).getData());
return asyncCollectionDelete(key, delete);
}
/**
* Generic count operation for collection items. Public methods for collection items call this method.
*
* @param k collection item's key
* @param collectionCount operation parameters (element key range, eflags, and so on)
* @return future holding the element count
*/
private CollectionFuture asyncCollectionCount(final String k,
final CollectionCount collectionCount) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionCount(k, collectionCount,
new OperationCallback() {
@Override
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(new Integer(cstatus.getMessage()),
new CollectionOperationStatus(
new OperationStatus(true, "END")));
return;
}
rv.set(null, cstatus);
}
@Override
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopGetItemCount(java.lang.String, long, long)
*/
@Override
public CollectionFuture asyncBopGetItemCount(String key,
long from, long to, ElementFlagFilter eFlagFilter) {
CollectionCount collectionCount = new BTreeCount(from, to, eFlagFilter);
return asyncCollectionCount(key, collectionCount);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, byte[], long, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture asyncBopInsert(String key, long bkey,
byte[] eFlag, Object value, CollectionAttributes attributesForCreate) {
BTreeStore bTreeStore = new BTreeStore(value,
eFlag, (attributesForCreate != null), null, attributesForCreate);
return asyncCollectionStore(key, String.valueOf(bkey), bTreeStore,
collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopInsert(java.lang.String, int, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture asyncLopInsert(String key, int index,
Object value, CollectionAttributes attributesForCreate) {
ListStore listStore = new ListStore(value,
(attributesForCreate != null), null, attributesForCreate);
return asyncCollectionStore(key, String.valueOf(index), listStore,
collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopInsert(java.lang.String, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture asyncSopInsert(String key, Object value,
CollectionAttributes attributesForCreate) {
SetStore setStore = new SetStore(value,
(attributesForCreate != null), null, attributesForCreate);
return asyncCollectionStore(key, "", setStore, collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, long, byte[], java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture asyncBopInsert(String key, long bkey,
byte[] eFlag, T value, CollectionAttributes attributesForCreate, Transcoder tc) {
BTreeStore bTreeStore = new BTreeStore(value, eFlag,
(attributesForCreate != null), null, attributesForCreate);
return asyncCollectionStore(key, String.valueOf(bkey), bTreeStore, tc);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopInsert(java.lang.String, int, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture asyncLopInsert(String key, int index,
T value, CollectionAttributes attributesForCreate, Transcoder tc) {
ListStore listStore = new ListStore(value, (attributesForCreate != null),
null, attributesForCreate);
return asyncCollectionStore(key, String.valueOf(index), listStore, tc);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopInsert(java.lang.String, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture asyncSopInsert(String key, T value,
CollectionAttributes attributesForCreate, Transcoder tc) {
SetStore setStore = new SetStore(value, (attributesForCreate != null),
null, attributesForCreate);
return asyncCollectionStore(key, "", setStore, tc);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.Map, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture> asyncBopPipedInsertBulk(
String key, Map elements,
CollectionAttributes attributesForCreate) {
return asyncBopPipedInsertBulk(key, elements, attributesForCreate,
collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopPipedInsertBulk(java.lang.String, int, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture> asyncLopPipedInsertBulk(
String key, int index, List valueList, CollectionAttributes attributesForCreate) {
return asyncLopPipedInsertBulk(key, index, valueList, attributesForCreate,
collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes)
*/
@Override
public CollectionFuture> asyncSopPipedInsertBulk(
String key, List valueList, CollectionAttributes attributesForCreate) {
return asyncSopPipedInsertBulk(key, valueList, attributesForCreate,
collectionTranscoder);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.Map, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture> asyncBopPipedInsertBulk(
String key, Map elements,
CollectionAttributes attributesForCreate, Transcoder tc) {
if (elements.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) {
BTreePipedStore store = new BTreePipedStore(key, elements,
(attributesForCreate != null), attributesForCreate, tc);
return asyncCollectionPipedStore(key, store);
} else {
List> storeList = new ArrayList>();
PartitionedMap list = new PartitionedMap(
elements, CollectionPipedStore.MAX_PIPED_ITEM_COUNT);
for (int i = 0; i < list.size(); i++) {
storeList
.add(new BTreePipedStore(key, list.get(i),
(attributesForCreate != null),
attributesForCreate, tc));
}
return asyncCollectionPipedStore(key, storeList);
}
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncLopPipedInsertBulk(java.lang.String, int, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture> asyncLopPipedInsertBulk(
String key, int index, List valueList,
CollectionAttributes attributesForCreate, Transcoder tc) {
if (valueList.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) {
ListPipedStore store = new ListPipedStore(key, index,
valueList, (attributesForCreate != null),
attributesForCreate, tc);
return asyncCollectionPipedStore(key, store);
} else {
PartitionedList list = new PartitionedList(valueList,
CollectionPipedStore.MAX_PIPED_ITEM_COUNT);
List> storeList = new ArrayList>(
list.size());
for (int i = 0; i < list.size(); i++) {
storeList
.add(new ListPipedStore(key, index, list.get(i),
(attributesForCreate != null),
attributesForCreate, tc));
}
return asyncCollectionPipedStore(key, storeList);
}
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncSopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder)
*/
@Override
public CollectionFuture> asyncSopPipedInsertBulk(
String key, List valueList,
CollectionAttributes attributesForCreate, Transcoder tc) {
if (valueList.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) {
SetPipedStore store = new SetPipedStore(key, valueList,
(attributesForCreate != null), attributesForCreate, tc);
return asyncCollectionPipedStore(key, store);
} else {
PartitionedList list = new PartitionedList(valueList,
CollectionPipedStore.MAX_PIPED_ITEM_COUNT);
List> storeList = new ArrayList>(
list.size());
for (int i = 0; i < list.size(); i++) {
storeList
.add(new SetPipedStore(key, list.get(i),
(attributesForCreate != null),
attributesForCreate, tc));
}
return asyncCollectionPipedStore(key, storeList);
}
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#flush(java.lang.String)
*/
@Override
public OperationFuture flush(final String prefix) {
return flush(prefix, -1);
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#flush(java.lang.String, int)
*/
@Override
public OperationFuture flush(final String prefix, final int delay) {
final AtomicReference flushResult = new AtomicReference(
null);
final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue();
final CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() {
public Operation newOp(final MemcachedNode n,
final CountDownLatch latch) {
Operation op = opFact.flush(prefix, delay, false,
new OperationCallback() {
public void receivedStatus(OperationStatus s) {
flushResult.set(s.isSuccess());
}
public void complete() {
latch.countDown();
}
});
ops.add(op);
return op;
}
});
return new OperationFuture(blatch, flushResult,
operationTimeout) {
@Override
public boolean cancel(boolean ign) {
boolean rv = false;
for (Operation op : ops) {
op.cancel("by application.");
rv |= op.getState() == OperationState.WRITING;
}
return rv;
}
@Override
public boolean isCancelled() {
for (Operation op : ops) {
if (op.isCancelled())
return true;
}
return false;
}
@Override
public Boolean get(long duration, TimeUnit units)
throws InterruptedException, TimeoutException, ExecutionException {
if(!blatch.await(duration, units)) {
// whenever timeout occurs, continuous timeout counter will increase by 1.
for (Operation op : ops) {
MemcachedConnection.opTimedOut(op);
}
throw new CheckedOperationTimeoutException(
"Timed out waiting for operation. >" + duration, ops);
} else {
// continuous timeout counter will be reset
for (Operation op : ops) {
MemcachedConnection.opSucceeded(op);
}
}
for (Operation op : ops) {
if(op != null && op.hasErrored()) {
throw new ExecutionException(op.getException());
}
if(op != null && op.isCancelled()) {
throw new ExecutionException(new RuntimeException(op.getCancelCause()));
}
}
return flushResult.get();
}
@Override
public boolean isDone() {
boolean rv = true;
for (Operation op : ops) {
rv &= op.getState() == OperationState.COMPLETE;
}
return rv || isCancelled();
}
};
}
/*
* (non-Javadoc)
* @see net.spy.memcached.ArcusClientIF#asyncBopSortMergeGet(java.util.List, long, long, int, int)
*/
@Override
public SMGetFuture>> asyncBopSortMergeGet(
List keyList, long from, long to, ElementFlagFilter eFlagFilter, int offset, int count) {
if (keyList == null || keyList.isEmpty()) {
throw new IllegalArgumentException("Key list is empty.");
}
if (offset < 0) {
throw new IllegalArgumentException("Offset must be 0 or positive integer.");
}
if (count < 1) {
throw new IllegalArgumentException("Count must be larger than 0.");
}
if (offset + count > MAX_SMGET_COUNT) {
throw new IllegalArgumentException(
"The sum of offset and count must not exceed a maximum of " + MAX_SMGET_COUNT + ".");
}
Map> arrangedKey = groupingKeys(keyList, smgetKeyChunkSize);
List> smGetList = new ArrayList>(
arrangedKey.size());
for (List v : arrangedKey.values()) {
if (arrangedKey.size() > 1) {
smGetList.add(new BTreeSMGetWithLongTypeBkeyOld(v, from, to, eFlagFilter, 0, offset + count));
}else {
smGetList.add(new BTreeSMGetWithLongTypeBkeyOld