Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
net.spy.memcached.ArcusClient Maven / Gradle / Ivy
/*
* arcus-java-client : Arcus Java client
* Copyright 2010-2014 NAVER Corp.
* Copyright 2014-2021 JaM2in Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.spy.memcached;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.net.URL;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import net.spy.memcached.collection.Attributes;
import net.spy.memcached.collection.BKeyObject;
import net.spy.memcached.collection.BTreeCount;
import net.spy.memcached.collection.BTreeCreate;
import net.spy.memcached.collection.BTreeDelete;
import net.spy.memcached.collection.BTreeElement;
import net.spy.memcached.collection.BTreeFindPosition;
import net.spy.memcached.collection.BTreeFindPositionWithGet;
import net.spy.memcached.collection.BTreeGet;
import net.spy.memcached.collection.BTreeGetBulk;
import net.spy.memcached.collection.BTreeGetBulkWithByteTypeBkey;
import net.spy.memcached.collection.BTreeGetBulkWithLongTypeBkey;
import net.spy.memcached.collection.BTreeGetByPosition;
import net.spy.memcached.collection.BTreeGetResult;
import net.spy.memcached.collection.BTreeMutate;
import net.spy.memcached.collection.BTreeOrder;
import net.spy.memcached.collection.BTreeSMGet;
import net.spy.memcached.collection.BTreeSMGetWithByteTypeBkey;
import net.spy.memcached.collection.BTreeSMGetWithByteTypeBkeyOld;
import net.spy.memcached.collection.BTreeSMGetWithLongTypeBkey;
import net.spy.memcached.collection.BTreeSMGetWithLongTypeBkeyOld;
import net.spy.memcached.collection.BTreeInsert;
import net.spy.memcached.collection.BTreeInsertAndGet;
import net.spy.memcached.collection.BTreeUpdate;
import net.spy.memcached.collection.BTreeUpsert;
import net.spy.memcached.collection.ByteArrayBKey;
import net.spy.memcached.collection.ByteArrayTreeMap;
import net.spy.memcached.collection.CollectionAttributes;
import net.spy.memcached.collection.CollectionBulkInsert;
import net.spy.memcached.collection.CollectionCount;
import net.spy.memcached.collection.CollectionCreate;
import net.spy.memcached.collection.CollectionDelete;
import net.spy.memcached.collection.CollectionExist;
import net.spy.memcached.collection.CollectionGet;
import net.spy.memcached.collection.CollectionMutate;
import net.spy.memcached.collection.CollectionPipedInsert;
import net.spy.memcached.collection.CollectionPipedInsert.BTreePipedInsert;
import net.spy.memcached.collection.CollectionPipedInsert.ByteArraysBTreePipedInsert;
import net.spy.memcached.collection.CollectionPipedInsert.ListPipedInsert;
import net.spy.memcached.collection.CollectionPipedInsert.SetPipedInsert;
import net.spy.memcached.collection.CollectionPipedInsert.MapPipedInsert;
import net.spy.memcached.collection.CollectionPipedUpdate;
import net.spy.memcached.collection.CollectionPipedUpdate.BTreePipedUpdate;
import net.spy.memcached.collection.CollectionPipedUpdate.MapPipedUpdate;
import net.spy.memcached.collection.CollectionResponse;
import net.spy.memcached.collection.CollectionInsert;
import net.spy.memcached.collection.CollectionUpdate;
import net.spy.memcached.collection.Element;
import net.spy.memcached.collection.ElementFlagFilter;
import net.spy.memcached.collection.ElementFlagUpdate;
import net.spy.memcached.collection.ElementValueType;
import net.spy.memcached.collection.ListCreate;
import net.spy.memcached.collection.ListDelete;
import net.spy.memcached.collection.ListGet;
import net.spy.memcached.collection.ListInsert;
import net.spy.memcached.collection.SMGetElement;
import net.spy.memcached.collection.SMGetTrimKey;
import net.spy.memcached.collection.SMGetMode;
import net.spy.memcached.collection.MapCreate;
import net.spy.memcached.collection.MapDelete;
import net.spy.memcached.collection.MapGet;
import net.spy.memcached.collection.MapInsert;
import net.spy.memcached.collection.MapUpdate;
import net.spy.memcached.collection.SetCreate;
import net.spy.memcached.collection.SetDelete;
import net.spy.memcached.collection.SetExist;
import net.spy.memcached.collection.SetGet;
import net.spy.memcached.collection.SetPipedExist;
import net.spy.memcached.collection.SetInsert;
import net.spy.memcached.compat.log.Logger;
import net.spy.memcached.compat.log.LoggerFactory;
import net.spy.memcached.internal.BTreeStoreAndGetFuture;
import net.spy.memcached.internal.CheckedOperationTimeoutException;
import net.spy.memcached.internal.CollectionFuture;
import net.spy.memcached.internal.CollectionGetBulkFuture;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.internal.BulkOperationFuture;
import net.spy.memcached.internal.SMGetFuture;
import net.spy.memcached.ops.BTreeFindPositionOperation;
import net.spy.memcached.ops.BTreeFindPositionWithGetOperation;
import net.spy.memcached.ops.BTreeGetBulkOperation;
import net.spy.memcached.ops.BTreeGetByPositionOperation;
import net.spy.memcached.ops.BTreeSortMergeGetOperation;
import net.spy.memcached.ops.BTreeSortMergeGetOperationOld;
import net.spy.memcached.ops.BTreeInsertAndGetOperation;
import net.spy.memcached.ops.CollectionBulkInsertOperation;
import net.spy.memcached.ops.CollectionGetOperation;
import net.spy.memcached.ops.CollectionOperationStatus;
import net.spy.memcached.ops.CollectionPipedExistOperation;
import net.spy.memcached.ops.CollectionPipedInsertOperation;
import net.spy.memcached.ops.CollectionPipedUpdateOperation;
import net.spy.memcached.ops.GetAttrOperation;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StoreType;
import net.spy.memcached.plugin.FrontCacheMemcachedClient;
import net.spy.memcached.transcoders.CollectionTranscoder;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.util.BTreeUtil;
/**
* Client to a Arcus.
*
* Basic usage
*
* {@code
* final static String arcusAdminAddrs = "127.0.0.1:2181";
* final static String serviceCode = "cafe";
*
* ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder();
*
* ArcusClient c = ArcusClient.createArcusClient(arcusAdminAddrs, serviceCode, cfb);
*
* // Store a value (async) for one hour
* c.set("someKey", 3600, someObject);
* // Retrieve a value.
* Future myFuture = c.asyncGet("someKey");
*
* If pool style is needed, it will be used as follows
*
* int poolSize = 4;
* ArcusClientPool pool =
* ArcusClient.createArcusClientPool(arcusAdminAddrs, serviceCode, cfb, poolSize);
*
* // Store a value
* pool.set("someKey", 3600, someObject);
* // Retrieve a value
* Future myFuture = pool.asyncGet("someKey");
*
* }
*/
public class ArcusClient extends FrontCacheMemcachedClient implements ArcusClientIF {
private static String VERSION;
private static final Logger arcusLogger = LoggerFactory.getLogger(ArcusClient.class);
private static final String ARCUS_CLOUD_ADDR = "127.0.0.1:2181";
private static final String DEFAULT_ARCUS_CLIENT_NAME = "ArcusClient";
private boolean dead;
// final BulkService bulkService;
private final Transcoder collectionTranscoder;
private final int smgetKeyChunkSize;
private static final int BOPGET_BULK_CHUNK_SIZE = 200;
private static final int NON_PIPED_BULK_INSERT_CHUNK_SIZE = 500;
private static final int MAX_GETBULK_ELEMENT_COUNT = 50;
private static final int MAX_SMGET_COUNT = 1000; // server configuration is 2000.
private static final int MAX_MKEY_LENGTH = 250;
private CacheManager cacheManager;
public void setCacheManager(CacheManager cacheManager) {
this.cacheManager = cacheManager;
}
/**
* @param hostPorts arcus admin addresses
* @param serviceCode service code
* @param cfb ConnectionFactoryBuilder
* @return a single ArcusClient
*/
public static ArcusClient createArcusClient(String hostPorts, String serviceCode,
ConnectionFactoryBuilder cfb) {
return ArcusClient.createArcusClient(hostPorts, serviceCode, cfb, 1, 10000).getClient();
}
/**
* @param serviceCode service code
* @param cfb ConnectionFactoryBuilder
* @return a single ArcusClient
*/
public static ArcusClient createArcusClient(String serviceCode,
ConnectionFactoryBuilder cfb) {
return ArcusClient.createArcusClient(ARCUS_CLOUD_ADDR, serviceCode, cfb, 1, 10000).getClient();
}
/**
* @param hostPorts arcus admin addresses
* @param serviceCode service code
* @param poolSize Arcus client pool size
* @param cfb ConnectionFactoryBuilder
* @return multiple ArcusClient
*/
public static ArcusClientPool createArcusClientPool(String hostPorts, String serviceCode,
ConnectionFactoryBuilder cfb, int poolSize) {
return ArcusClient.createArcusClient(hostPorts, serviceCode, cfb, poolSize, 0);
}
/**
* @param serviceCode service code
* @param poolSize Arcus client pool size
* @param cfb ConnectionFactoryBuilder
* @return multiple ArcusClient
*/
public static ArcusClientPool createArcusClientPool(String serviceCode,
ConnectionFactoryBuilder cfb, int poolSize) {
return ArcusClient.createArcusClient(ARCUS_CLOUD_ADDR, serviceCode, cfb, poolSize, 0);
}
/**
* @param hostPorts arcus admin addresses
* @param serviceCode service code
* @param cfb ConnectionFactoryBuilder
* @param poolSize Arcus client pool size
* @param waitTimeFor Connect
* waiting time for connection establishment(milliseconds)
* @return multiple ArcusClient
*/
private static ArcusClientPool createArcusClient(String hostPorts, String serviceCode,
ConnectionFactoryBuilder cfb, int poolSize,
int waitTimeForConnect) {
if (hostPorts == null) {
throw new NullPointerException("Arcus admin address required.");
}
if (serviceCode == null) {
throw new NullPointerException("Service code required.");
}
if (hostPorts.isEmpty()) {
throw new IllegalArgumentException("Arcus admin address is empty.");
}
if (serviceCode.isEmpty()) {
throw new IllegalArgumentException("Service code is empty.");
}
final CountDownLatch latch = new CountDownLatch(1);
CacheManager exe = new CacheManager(hostPorts, serviceCode, cfb, latch, poolSize,
waitTimeForConnect);
try {
latch.await();
} catch (Exception e) {
arcusLogger.warn("you cannot see this message!");
}
ArcusClient[] client = exe.getAC();
return new ArcusClientPool(poolSize, client);
}
/**
* Create an Arcus client for the given memcached server addresses.
*
* @param cf connection factory to configure connections for this client
* @param name client name
* @param addrs socket addresses for the memcached servers
* @return Arcus client
*/
protected static ArcusClient getInstance(ConnectionFactory cf,
String name,
List addrs) throws IOException {
return new ArcusClient(cf, name, addrs);
}
/**
* Create an Arcus client for the given memcached server addresses.
*
* @param cf connection factory to configure connections for this client
* @param name client name
* @param addrs socket addresses for the memcached servers
* @throws IOException if connections cannot be established
*/
public ArcusClient(ConnectionFactory cf, String name, List addrs)
throws IOException {
super(cf, name, addrs);
// bulkService = new BulkService(cf.getBulkServiceLoopLimit(),
// cf.getBulkServiceThreadCount(), cf.getBulkServiceSingleOpTimeout());
collectionTranscoder = new CollectionTranscoder();
smgetKeyChunkSize = cf.getDefaultMaxSMGetKeyChunkSize();
registerMbean();
}
/**
* Create an Arcus client for the given memcached server addresses.
*
* @param cf connection factory to configure connections for this client
* @param addrs socket addresses for the memcached servers
* @throws IOException if connections cannot be established
*/
public ArcusClient(ConnectionFactory cf, List addrs)
throws IOException {
super(cf, DEFAULT_ARCUS_CLIENT_NAME, addrs);
collectionTranscoder = new CollectionTranscoder();
smgetKeyChunkSize = cf.getDefaultMaxSMGetKeyChunkSize();
registerMbean();
}
/**
* Register mbean for Arcus client statistics.
*/
private void registerMbean() {
if ("false".equals(System.getProperty("arcus.mbean", "false").toLowerCase())) {
getLogger().info("Arcus client statistics MBean is NOT registered.");
return;
}
try {
StatisticsHandler mbean = new StatisticsHandler(this);
ArcusMBeanServer.getInstance().registMBean(
mbean,
mbean.getClass().getPackage().getName() + ":type="
+ mbean.getClass().getSimpleName() + "-"
+ mbean.hashCode());
getLogger().info("Arcus client statistics MBean is registered.");
} catch (Exception e) {
getLogger().warn("Failed to initialize statistics mbean.", e);
}
}
@Override
public void shutdown() {
super.shutdown();
// Connect to Arcus server directly, cache manager may be null.
if (cacheManager != null) {
cacheManager.shutdown();
}
dead = true;
// if (bulkService != null) {
// bulkService.shutdown();
// }
}
private void validateMKey(String mkey) {
byte[] keyBytes = KeyUtil.getKeyBytes(mkey);
if (keyBytes.length > MAX_MKEY_LENGTH) {
throw new IllegalArgumentException("MKey is too long (maxlen = "
+ MAX_MKEY_LENGTH + ")");
}
if (keyBytes.length == 0) {
throw new IllegalArgumentException("MKey must contain at least one character.");
}
// Validate the mkey
for (byte b : keyBytes) {
if (b == ' ' || b == '\n' || b == '\r' || b == 0) {
throw new IllegalArgumentException("MKey contains invalid characters: ``"
+ mkey + "''");
}
}
}
OperationFuture asyncStore(StoreType storeType, String key, int exp, CachedData co) {
final CountDownLatch latch = new CountDownLatch(1);
final OperationFuture rv = new OperationFuture(latch,
operationTimeout);
Operation op = opFact.store(storeType, key, co.getFlags(),
exp, co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus val) {
rv.set(val.isSuccess(), val);
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
@Override
public CollectionFuture asyncSetAttr(String key, Attributes attrs) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.setAttr(key, attrs, new OperationCallback() {
public void receivedStatus(OperationStatus status) {
if (status instanceof CollectionOperationStatus) {
rv.set(status.isSuccess(), (CollectionOperationStatus) status);
} else {
getLogger().warn("Unhandled state: " + status);
rv.set(status.isSuccess(), new CollectionOperationStatus(status));
}
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
@Override
public CollectionFuture asyncGetAttr(final String key) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.getAttr(key, new GetAttrOperation.Callback() {
private final CollectionAttributes attrs = new CollectionAttributes();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus stat;
if (status instanceof CollectionOperationStatus) {
stat = (CollectionOperationStatus) status;
} else {
stat = new CollectionOperationStatus(status);
}
rv.set(stat.isSuccess() ? attrs : null, stat);
}
public void complete() {
latch.countDown();
}
public void gotAttribute(String k, String attr) {
assert key.equals(k) : "Wrong key returned";
attrs.setAttribute(attr);
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic get operation for list items. Public methods for list items call this method.
*
* @param k list item's key
* @param collectionGet operation parameters (element key and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the fetched value
*/
private CollectionFuture> asyncLopGet(final String k,
final CollectionGet collectionGet,
final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv = new CollectionFuture>(
latch, operationTimeout);
Operation op = opFact.collectionGet(k, collectionGet,
new CollectionGetOperation.Callback() {
private final List list = new ArrayList();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(list, cstatus);
return;
}
switch (cstatus.getResponse()) {
case NOT_FOUND:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) not found : %s", k, cstatus);
}
break;
case NOT_FOUND_ELEMENT:
rv.set(list, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found : %s", k, cstatus);
}
break;
case OUT_OF_RANGE:
rv.set(list, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found in condition : %s", k, cstatus);
}
break;
case UNREADABLE:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) is not readable : %s", k, cstatus);
}
break;
default:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) unknown status : %s", k, cstatus);
}
break;
}
}
public void complete() {
latch.countDown();
}
public void gotData(String key, String subkey, int flags, byte[] data) {
assert key.equals(k) : "Wrong key returned";
list.add(tc.decode(new CachedData(flags, data, tc.getMaxSize())));
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
@Override
public CollectionFuture asyncSopExist(String key, T value,
Transcoder tc) {
SetExist exist = new SetExist(value, tc);
return asyncCollectionExist(key, "", exist, tc);
}
@Override
public CollectionFuture asyncSopExist(String key, Object value) {
SetExist exist = new SetExist(value, collectionTranscoder);
return asyncCollectionExist(key, "", exist, collectionTranscoder);
}
/**
* Generic get operation for set items. Public methods for set items call this method.
*
* @param k set item's key
* @param collectionGet operation parameters (element key and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the fetched value
*/
private CollectionFuture> asyncSopGet(final String k,
final CollectionGet collectionGet,
final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv = new CollectionFuture>(latch,
operationTimeout);
Operation op = opFact.collectionGet(k, collectionGet,
new CollectionGetOperation.Callback() {
private final Set set = new HashSet();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(set, cstatus);
return;
}
switch (cstatus.getResponse()) {
case NOT_FOUND:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) not found : %s", k, cstatus);
}
break;
case NOT_FOUND_ELEMENT:
rv.set(set, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found : %s", k, cstatus);
}
break;
case UNREADABLE:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Collection(%s) is not readable : %s", k, cstatus);
}
break;
default:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) unknown status : %s", k, cstatus);
}
break;
}
}
public void complete() {
latch.countDown();
}
public void gotData(String key, String subkey, int flags, byte[] data) {
assert key.equals(k) : "Wrong key returned";
set.add(tc.decode(new CachedData(flags, data, tc.getMaxSize())));
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
/**
* Generic get operation for b+tree items. Public methods for b+tree items call this method.
*
* @param k b+tree item's key
* @param collectionGet operation parameters (element keys and so on)
* @param reverse false=forward or true=backward
* @param tc transcoder to serialize and unserialize value
* @return future holding the map of fetched elements and their keys
*/
private CollectionFuture>> asyncBopGet(
final String k, final CollectionGet collectionGet,
final boolean reverse, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture>> rv = new CollectionFuture>>(
latch, operationTimeout);
Operation op = opFact.collectionGet(k, collectionGet,
new CollectionGetOperation.Callback() {
private final TreeMap> map = new TreeMap>(
(reverse) ? Collections.reverseOrder() : null);
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(map, cstatus);
return;
}
switch (cstatus.getResponse()) {
case NOT_FOUND:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) not found : %s", k, cstatus);
}
break;
case NOT_FOUND_ELEMENT:
rv.set(map, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found : %s", k, cstatus);
}
break;
case UNREADABLE:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) is not readable : %s", k, cstatus);
}
break;
default:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) Unknown response : %s", k, cstatus);
}
break;
}
}
public void complete() {
latch.countDown();
}
public void gotData(String key, String subkey, int flags, byte[] data) {
assert key.equals(k) : "Wrong key returned";
long longSubkey = Long.parseLong(subkey);
map.put(longSubkey, new Element(longSubkey,
tc.decode(new CachedData(flags, data, tc.getMaxSize())),
collectionGet.getElementFlag()));
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
/**
* Generic get operation for map items. Public methods for b+tree items call this method.
*
* @param k map item's key
* @param collectionGet operation parameters (element keys and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the map of fetched elements and their keys
*/
private CollectionFuture> asyncMopGet(
final String k, final CollectionGet collectionGet, final Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv = new CollectionFuture>(
latch, operationTimeout);
Operation op = opFact.collectionGet(k, collectionGet,
new CollectionGetOperation.Callback() {
private final HashMap map = new HashMap();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(map, cstatus);
return;
}
switch (cstatus.getResponse()) {
case NOT_FOUND:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) not found : %s", k, cstatus);
}
break;
case NOT_FOUND_ELEMENT:
rv.set(map, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) not found : %s", k, cstatus);
}
break;
case UNREADABLE:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Element(%s) is not readable : %s", k, cstatus);
}
break;
default:
rv.set(null, cstatus);
if (getLogger().isDebugEnabled()) {
getLogger().debug("Key(%s) Unknown response : %s", k, cstatus);
}
break;
}
}
public void complete() {
latch.countDown();
}
public void gotData(String key, String subkey, int flags, byte[] data) {
assert key.equals(k) : "Wrong key returned";
map.put(subkey, tc.decode(new CachedData(flags, data, tc.getMaxSize())));
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
/**
* Generic insert operation for collection items.
* Public methods for collection items call this method.
*
* @param key collection item's key
* @param subkey element key (list index, b+tree bkey)
* @param collectionInsert operation parameters (value, eflags, attributes, and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the success/failure of the operation
*/
private CollectionFuture asyncCollectionInsert(String key,
String subkey,
CollectionInsert collectionInsert,
Transcoder tc) {
CachedData co = tc.encode(collectionInsert.getValue());
collectionInsert.setFlags(co.getFlags());
return asyncCollectionInsert(key, subkey, collectionInsert, co);
}
/**
* Generic insert operation for collection items.
* Public methods for collection items call this method.
*
* @param key collection item's key
* @param subkey element key (list index, b+tree bkey)
* @param collectionInsert operation parameters (value, eflags, attributes, and so on)
* @param co transcoded value
* @return future holding the success/failure of the operation
*/
CollectionFuture asyncCollectionInsert(final String key,
final String subkey,
final CollectionInsert collectionInsert,
final CachedData co) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionInsert(key, subkey, collectionInsert,
co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(cstatus.isSuccess(), cstatus);
if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) {
getLogger().debug("Insertion to the collection failed : "
+ cstatus.getMessage()
+ " (type="
+ collectionInsert.getClass()
.getName() + ", key=" + key
+ ", subkey=" + subkey + ", value="
+ collectionInsert.getValue() + ")");
}
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic pipelined insert operation for collection items.
* Public methods for collection items call this method.
*
* @param key collection item's key
* @param insert operation parameters (values, attributes, and so on)
* @return future holding the success/failure codes of individual operations and their index
*/
CollectionFuture> asyncCollectionPipedInsert(
final String key, final CollectionPipedInsert insert) {
if (insert.getItemCount() == 0) {
throw new IllegalArgumentException(
"The number of piped operations must be larger than 0.");
}
if (insert.getItemCount() > CollectionPipedInsert.MAX_PIPED_ITEM_COUNT) {
throw new IllegalArgumentException(
"The number of piped operations must not exceed a maximum of "
+ CollectionPipedInsert.MAX_PIPED_ITEM_COUNT + ".");
}
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv =
new CollectionFuture>(latch, operationTimeout);
Operation op = opFact.collectionPipedInsert(key, insert,
new CollectionPipedInsertOperation.Callback() {
private final Map result =
new TreeMap();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(result, cstatus);
}
public void complete() {
latch.countDown();
}
public void gotStatus(Integer index, OperationStatus status) {
if (status instanceof CollectionOperationStatus) {
result.put(index, (CollectionOperationStatus) status);
} else {
result.put(index, new CollectionOperationStatus(status));
}
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic pipelined update operation for collection items.
* Public methods for collection items call this method.
*
* @param key collection item's key
* @param update operation parameters (values and so on)
* @return future holding the success/failure codes of individual operations and their index
*/
CollectionFuture> asyncCollectionPipedUpdate(
final String key, final CollectionPipedUpdate update) {
if (update.getItemCount() == 0) {
throw new IllegalArgumentException(
"The number of piped operations must be larger than 0.");
}
if (update.getItemCount() > CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT) {
throw new IllegalArgumentException(
"The number of piped operations must not exceed a maximum of "
+ CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT + ".");
}
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture> rv =
new CollectionFuture>(latch, operationTimeout);
Operation op = opFact.collectionPipedUpdate(key, update,
new CollectionPipedUpdateOperation.Callback() {
private final Map result =
new TreeMap();
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(result, cstatus);
}
public void complete() {
latch.countDown();
}
public void gotStatus(Integer index, OperationStatus status) {
if (status instanceof CollectionOperationStatus) {
result.put(index, (CollectionOperationStatus) status);
} else {
result.put(index, new CollectionOperationStatus(status));
}
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic pipelined update operation for collection items.
* Public methods for collection items call this method.
*
* @param key collection item's key
* @param updateList list of operation parameters (values and so on)
* @return future holding the success/failure codes of individual operations and their index
*/
CollectionFuture> asyncCollectionPipedUpdate(
final String key, final List> updateList) {
final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue();
final CountDownLatch latch = new CountDownLatch(updateList.size());
final List mergedOperationStatus = Collections
.synchronizedList(new ArrayList(1));
final Map mergedResult =
new ConcurrentHashMap();
for (int i = 0; i < updateList.size(); i++) {
final CollectionPipedUpdate update = updateList.get(i);
final int idx = i;
Operation op = opFact.collectionPipedUpdate(key, update,
new CollectionPipedUpdateOperation.Callback() {
// each result status
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
mergedOperationStatus.add(cstatus);
}
// complete
public void complete() {
latch.countDown();
}
// got status
public void gotStatus(Integer index, OperationStatus status) {
if (status instanceof CollectionOperationStatus) {
mergedResult.put(index + (idx * CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT),
(CollectionOperationStatus) status);
} else {
mergedResult.put(index + (idx * CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT),
new CollectionOperationStatus(status));
}
}
});
addOp(key, op);
ops.add(op);
}
return new CollectionFuture>(
latch, operationTimeout) {
@Override
public boolean cancel(boolean ign) {
boolean rv = false;
for (Operation op : ops) {
op.cancel("by application.");
rv |= op.getState() == OperationState.WRITE_QUEUED;
}
return rv;
}
@Override
public boolean isCancelled() {
for (Operation op : ops) {
if (op.isCancelled()) {
return true;
}
}
return false;
}
@Override
public Map get(long duration,
TimeUnit units)
throws InterruptedException, TimeoutException, ExecutionException {
if (!latch.await(duration, units)) {
for (Operation op : ops) {
MemcachedConnection.opTimedOut(op);
}
throw new CheckedOperationTimeoutException(duration, units, ops);
} else {
// continuous timeout counter will be reset
for (Operation op : ops) {
MemcachedConnection.opSucceeded(op);
}
}
for (Operation op : ops) {
if (op != null && op.hasErrored()) {
throw new ExecutionException(op.getException());
}
if (op != null && op.isCancelled()) {
throw new ExecutionException(new RuntimeException(op.getCancelCause()));
}
}
return mergedResult;
}
@Override
public CollectionOperationStatus getOperationStatus() {
for (OperationStatus status : mergedOperationStatus) {
if (!status.isSuccess()) {
return new CollectionOperationStatus(status);
}
}
return new CollectionOperationStatus(true, "END", CollectionResponse.END);
}
@Override
public boolean isDone() {
for (Operation op : ops) {
if (!(op.getState() == OperationState.COMPLETE || op.isCancelled())) {
return false;
}
}
return true;
}
};
}
/**
* Generic delete operation for collection items.
* Public methods for collection items call this method.
*
* @param key collection item's key
* @param collectionDelete operation parameters (element index/key, value, and so on)
* @return future holding the success/failure of the operation
*/
private CollectionFuture asyncCollectionDelete(
final String key, final CollectionDelete collectionDelete) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionDelete(key, collectionDelete,
new OperationCallback() {
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(cstatus.isSuccess(), cstatus);
if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) {
getLogger().debug("Deletion to the collection failed : "
+ cstatus.getMessage()
+ " (type="
+ collectionDelete.getClass().getName()
+ ", key=" + key + ")");
}
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
/**
* Generic existence operation for collection items.
* Public methods for collection items call this method.
*
* @param key collection item's key
* @param subkey element key (list index, b+tree bkey)
* @param collectionExist operation parameters (element value and so on)
* @param tc transcoder to serialize and unserialize value
* @return future holding the success/failure of the operation
*/
private CollectionFuture asyncCollectionExist(
final String key, final String subkey,
final CollectionExist collectionExist, Transcoder tc) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionExist(key, subkey, collectionExist,
new OperationCallback() {
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
boolean isExist = (CollectionResponse.EXIST == cstatus.getResponse())
? true : false;
rv.set(isExist, cstatus);
if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) {
getLogger().debug("Exist command to the collection failed : "
+ cstatus.getMessage()
+ " (type="
+ collectionExist.getClass().getName()
+ ", key=" + key
+ ", subkey=" + subkey + ")");
}
}
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
@Deprecated
@Override
public Future> asyncSetBulk(final List key,
final int exp, final T o,
final Transcoder tc) {
if (key == null) {
throw new IllegalArgumentException("Key list is null.");
} else if (key.isEmpty()) {
throw new IllegalArgumentException("Key list is empty.");
}
final CachedData co = tc.encode(o);
final CountDownLatch blatch = new CountDownLatch(key.size());
return new BulkOperationFuture(key, blatch, operationTimeout) {
@Override
public Operation createOp(final String k) {
Operation op = opFact.store(StoreType.set, k, co.getFlags(),
exp, co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus val) {
if (!val.isSuccess()) {
failedResult.put(k, new CollectionOperationStatus(false, String
.valueOf(val.isSuccess()), CollectionResponse.END));
}
}
public void complete() {
blatch.countDown();
}
});
addOp(k, op);
return op;
}
};
}
@Deprecated
@Override
public Future> asyncSetBulk(List key,
int exp, Object o) {
return asyncSetBulk(key, exp, o, transcoder);
}
@Deprecated
@Override
public Future> asyncSetBulk(final Map o,
final int exp,
final Transcoder tc) {
if (o == null) {
throw new IllegalArgumentException("Map is null.");
} else if (o.isEmpty()) {
throw new IllegalArgumentException("Map is empty.");
}
final CountDownLatch blatch = new CountDownLatch(o.size());
return new BulkOperationFuture(o.keySet(), blatch,
operationTimeout) {
@Override
public Operation createOp(final String k) {
CachedData co = tc.encode(o.get(k));
Operation op = opFact.store(StoreType.set, k, co.getFlags(),
exp, co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus val) {
if (!val.isSuccess()) {
failedResult.put(k, new CollectionOperationStatus(false, String
.valueOf(val.isSuccess()), CollectionResponse.END));
}
}
public void complete() {
blatch.countDown();
}
});
addOp(k, op);
return op;
}
};
}
@Deprecated
@Override
public Future> asyncSetBulk(Map o,
int exp) {
return asyncSetBulk(o, exp, transcoder);
}
@Override
public Future> asyncStoreBulk(final StoreType type,
final List key,
final int exp, final T o,
final Transcoder tc) {
if (key == null) {
throw new IllegalArgumentException("Key list is null.");
} else if (key.isEmpty()) {
throw new IllegalArgumentException("Key list is empty.");
}
final CachedData co = tc.encode(o);
final CountDownLatch blatch = new CountDownLatch(key.size());
return new BulkOperationFuture(key, blatch, operationTimeout) {
@Override
public Operation createOp(final String k) {
Operation op = opFact.store(type, k, co.getFlags(),
exp, co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus val) {
if (!val.isSuccess()) {
failedResult.put(k, val);
}
}
public void complete() {
blatch.countDown();
}
});
addOp(k, op);
return op;
}
};
}
@Override
public Future> asyncStoreBulk(StoreType type,
List key,
int exp, Object o) {
return asyncStoreBulk(type, key, exp, o, transcoder);
}
@Override
public Future> asyncStoreBulk(final StoreType type,
final Map o,
final int exp,
final Transcoder tc) {
if (o == null) {
throw new IllegalArgumentException("Map is null.");
} else if (o.isEmpty()) {
throw new IllegalArgumentException("Map is empty.");
}
final CountDownLatch blatch = new CountDownLatch(o.size());
return new BulkOperationFuture(o.keySet(), blatch, operationTimeout) {
@Override
public Operation createOp(final String k) {
CachedData co = tc.encode(o.get(k));
Operation op = opFact.store(type, k, co.getFlags(),
exp, co.getData(), new OperationCallback() {
public void receivedStatus(OperationStatus val) {
if (!val.isSuccess()) {
failedResult.put(k, val);
}
}
public void complete() {
blatch.countDown();
}
});
addOp(k, op);
return op;
}
};
}
@Override
public Future> asyncStoreBulk(StoreType type,
Map o,
int exp) {
return asyncStoreBulk(type, o, exp, transcoder);
}
@Override
public Future> asyncDeleteBulk(List key) {
if (key == null) {
throw new IllegalArgumentException("Key list is null.");
} else if (key.isEmpty()) {
throw new IllegalArgumentException("Key list is empty.");
}
final CountDownLatch blatch = new CountDownLatch(key.size());
return new BulkOperationFuture(key, blatch, operationTimeout) {
@Override
public Operation createOp(final String k) {
Operation op = opFact.delete(k, new OperationCallback() {
public void receivedStatus(OperationStatus val) {
if (!val.isSuccess()) {
failedResult.put(k, val);
}
}
public void complete() {
blatch.countDown();
}
});
addOp(k, op);
return op;
}
};
}
@Override
public Future> asyncDeleteBulk(String... key) {
if (key == null) {
throw new IllegalArgumentException("Key list is null.");
}
return asyncDeleteBulk(Arrays.asList(key));
}
@Override
public int getMaxPipedItemCount() {
return CollectionPipedInsert.MAX_PIPED_ITEM_COUNT;
}
@Override
public CollectionFuture asyncBopCreate(String key,
ElementValueType valueType,
CollectionAttributes attributes) {
int flag = CollectionTranscoder.examineFlags(valueType);
boolean noreply = false;
CollectionCreate bTreeCreate = new BTreeCreate(flag,
attributes.getExpireTime(), attributes.getMaxCount(),
attributes.getOverflowAction(), attributes.getReadable(), noreply);
return asyncCollectionCreate(key, bTreeCreate);
}
@Override
public CollectionFuture asyncMopCreate(String key,
ElementValueType type,
CollectionAttributes attributes) {
int flag = CollectionTranscoder.examineFlags(type);
boolean noreply = false;
CollectionCreate mapCreate = new MapCreate(flag,
attributes.getExpireTime(), attributes.getMaxCount(),
attributes.getReadable(), noreply);
return asyncCollectionCreate(key, mapCreate);
}
@Override
public CollectionFuture asyncSopCreate(String key,
ElementValueType type,
CollectionAttributes attributes) {
int flag = CollectionTranscoder.examineFlags(type);
boolean noreply = false;
CollectionCreate setCreate = new SetCreate(flag,
attributes.getExpireTime(), attributes.getMaxCount(),
attributes.getReadable(), noreply);
return asyncCollectionCreate(key, setCreate);
}
@Override
public CollectionFuture asyncLopCreate(String key,
ElementValueType type,
CollectionAttributes attributes) {
int flag = CollectionTranscoder.examineFlags(type);
boolean noreply = false;
CollectionCreate listCreate = new ListCreate(flag,
attributes.getExpireTime(), attributes.getMaxCount(),
attributes.getOverflowAction(), attributes.getReadable(), noreply);
return asyncCollectionCreate(key, listCreate);
}
/**
* Generic create operation for collection items.
* Public methods for collection items call this method.
*
* @param key collection item's key
* @param collectionCreate operation parameters (flags, expiration time, and so on)
* @return future holding the success/failure of the operation
*/
CollectionFuture asyncCollectionCreate(final String key,
final CollectionCreate collectionCreate) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionCreate(key, collectionCreate,
new OperationCallback() {
@Override
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
rv.set(cstatus.isSuccess(), cstatus);
if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) {
getLogger().debug("Insertion to the collection failed : "
+ cstatus.getMessage()
+ " (type="
+ collectionCreate.getClass().getName()
+ ", key=" + key
+ ", attribute="
+ collectionCreate.toString() + ")");
}
}
@Override
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(key, op);
return rv;
}
@Override
public CollectionFuture>> asyncBopGet(String key,
long bkey,
ElementFlagFilter eFlagFilter,
boolean withDelete,
boolean dropIfEmpty) {
BTreeGet get = new BTreeGet(bkey, withDelete, dropIfEmpty, eFlagFilter);
return asyncBopGet(key, get, false, collectionTranscoder);
}
@Override
public CollectionFuture>> asyncBopGet(String key,
long from, long to,
ElementFlagFilter eFlagFilter,
int offset, int count,
boolean withDelete,
boolean dropIfEmpty) {
BTreeGet get = new BTreeGet(from, to, offset, count, withDelete, dropIfEmpty, eFlagFilter);
boolean reverse = from > to;
return asyncBopGet(key, get, reverse, collectionTranscoder);
}
@Override
public CollectionFuture>> asyncBopGet(String key,
long bkey,
ElementFlagFilter eFlagFilter,
boolean withDelete,
boolean dropIfEmpty,
Transcoder tc) {
BTreeGet get = new BTreeGet(bkey, withDelete, dropIfEmpty, eFlagFilter);
return asyncBopGet(key, get, false, tc);
}
@Override
public CollectionFuture>> asyncBopGet(String key,
long from, long to,
ElementFlagFilter eFlagFilter,
int offset, int count,
boolean withDelete,
boolean dropIfEmpty,
Transcoder tc) {
BTreeGet get = new BTreeGet(from, to, offset, count, withDelete, dropIfEmpty, eFlagFilter);
boolean reverse = from > to;
return asyncBopGet(key, get, reverse, tc);
}
@Override
public CollectionFuture> asyncMopGet(String key,
boolean withDelete,
boolean dropIfEmpty) {
List mkeyList = new ArrayList();
MapGet get = new MapGet(mkeyList, withDelete, dropIfEmpty);
return asyncMopGet(key, get, collectionTranscoder);
}
@Override
public CollectionFuture> asyncMopGet(String key,
String mkey,
boolean withDelete,
boolean dropIfEmpty) {
if (mkey == null) {
throw new IllegalArgumentException("mkey is null");
}
validateMKey(mkey);
List mkeyList = new ArrayList(1);
mkeyList.add(mkey);
MapGet get = new MapGet(mkeyList, withDelete, dropIfEmpty);
return asyncMopGet(key, get, collectionTranscoder);
}
@Override
public CollectionFuture> asyncMopGet(String key,
List mkeyList,
boolean withDelete,
boolean dropIfEmpty) {
if (mkeyList == null) {
throw new IllegalArgumentException("mkeyList is null");
}
for (int i = 0; i < mkeyList.size(); i++) {
validateMKey(mkeyList.get(i));
}
MapGet get = new MapGet(mkeyList, withDelete, dropIfEmpty);
return asyncMopGet(key, get, collectionTranscoder);
}
@Override
public CollectionFuture> asyncMopGet(String key,
boolean withDelete, boolean dropIfEmpty,
Transcoder tc) {
List mkeyList = new ArrayList();
MapGet get = new MapGet(mkeyList, withDelete, dropIfEmpty);
return asyncMopGet(key, get, tc);
}
@Override
public CollectionFuture> asyncMopGet(String key,
String mkey,
boolean withDelete, boolean dropIfEmpty,
Transcoder tc) {
if (mkey == null) {
throw new IllegalArgumentException("mkey is null");
}
validateMKey(mkey);
List mkeyList = new ArrayList(1);
mkeyList.add(mkey);
MapGet get = new MapGet(mkeyList, withDelete, dropIfEmpty);
return asyncMopGet(key, get, tc);
}
@Override
public CollectionFuture> asyncMopGet(String key,
List mkeyList,
boolean withDelete, boolean dropIfEmpty,
Transcoder tc) {
if (mkeyList == null) {
throw new IllegalArgumentException("mkeyList is null");
}
for (int i = 0; i < mkeyList.size(); i++) {
validateMKey(mkeyList.get(i));
}
MapGet get = new MapGet(mkeyList, withDelete, dropIfEmpty);
return asyncMopGet(key, get, tc);
}
@Override
public CollectionFuture> asyncLopGet(String key, int index,
boolean withDelete, boolean dropIfEmpty) {
ListGet get = new ListGet(index, withDelete, dropIfEmpty);
return asyncLopGet(key, get, collectionTranscoder);
}
@Override
public CollectionFuture> asyncLopGet(String key,
int from, int to,
boolean withDelete, boolean dropIfEmpty) {
ListGet get = new ListGet(from, to, withDelete, dropIfEmpty);
return asyncLopGet(key, get, collectionTranscoder);
}
@Override
public CollectionFuture> asyncLopGet(String key, int index,
boolean withDelete, boolean dropIfEmpty,
Transcoder tc) {
ListGet get = new ListGet(index, withDelete, dropIfEmpty);
return asyncLopGet(key, get, tc);
}
@Override
public CollectionFuture> asyncLopGet(String key,
int from, int to,
boolean withDelete, boolean dropIfEmpty,
Transcoder tc) {
ListGet get = new ListGet(from, to, withDelete, dropIfEmpty);
return asyncLopGet(key, get, tc);
}
@Override
public CollectionFuture> asyncSopGet(String key, int count,
boolean withDelete, boolean dropIfEmpty) {
SetGet get = new SetGet(count, withDelete, dropIfEmpty);
return asyncSopGet(key, get, collectionTranscoder);
}
@Override
public CollectionFuture> asyncSopGet(String key, int count,
boolean withDelete, boolean dropIfEmpty,
Transcoder tc) {
SetGet get = new SetGet(count, withDelete, dropIfEmpty);
return asyncSopGet(key, get, tc);
}
@Override
public CollectionFuture asyncBopDelete(String key, long bkey,
ElementFlagFilter eFlagFilter,
boolean dropIfEmpty) {
BTreeDelete delete = new BTreeDelete(bkey, false, dropIfEmpty, eFlagFilter);
return asyncCollectionDelete(key, delete);
}
@Override
public CollectionFuture asyncBopDelete(String key,
long from, long to,
ElementFlagFilter eFlagFilter, int count,
boolean dropIfEmpty) {
BTreeDelete delete = new BTreeDelete(from, to, count, false, dropIfEmpty, eFlagFilter);
return asyncCollectionDelete(key, delete);
}
@Override
public CollectionFuture asyncMopDelete(String key,
boolean dropIfEmpty) {
List mkeyList = new ArrayList();
MapDelete delete = new MapDelete(mkeyList, false, dropIfEmpty);
return asyncCollectionDelete(key, delete);
}
@Override
public CollectionFuture asyncMopDelete(String key, String mkey,
boolean dropIfEmpty) {
if (mkey == null) {
throw new IllegalArgumentException("mkey is null");
}
validateMKey(mkey);
List mkeyList = new ArrayList(1);
mkeyList.add(mkey);
MapDelete delete = new MapDelete(mkeyList, false, dropIfEmpty);
return asyncCollectionDelete(key, delete);
}
@Override
public CollectionFuture asyncLopDelete(String key, int index,
boolean dropIfEmpty) {
ListDelete delete = new ListDelete(index, false, dropIfEmpty);
return asyncCollectionDelete(key, delete);
}
@Override
public CollectionFuture asyncLopDelete(String key, int from,
int to, boolean dropIfEmpty) {
ListDelete delete = new ListDelete(from, to, false, dropIfEmpty);
return asyncCollectionDelete(key, delete);
}
@Override
public CollectionFuture asyncSopDelete(String key, Object value,
boolean dropIfEmpty) {
SetDelete delete = new SetDelete(value, false, dropIfEmpty,
collectionTranscoder);
return asyncCollectionDelete(key, delete);
}
@Override
public CollectionFuture asyncSopDelete(String key, T value,
boolean dropIfEmpty, Transcoder tc) {
SetDelete delete = new SetDelete(value, false, dropIfEmpty, tc);
return asyncCollectionDelete(key, delete);
}
/**
* Generic count operation for collection items.
* Public methods for collection items call this method.
*
* @param k collection item's key
* @param collectionCount operation parameters (element key range, eflags, and so on)
* @return future holding the element count
*/
private CollectionFuture asyncCollectionCount(final String k,
final CollectionCount collectionCount) {
final CountDownLatch latch = new CountDownLatch(1);
final CollectionFuture rv = new CollectionFuture(
latch, operationTimeout);
Operation op = opFact.collectionCount(k, collectionCount,
new OperationCallback() {
@Override
public void receivedStatus(OperationStatus status) {
CollectionOperationStatus cstatus;
if (status instanceof CollectionOperationStatus) {
cstatus = (CollectionOperationStatus) status;
} else {
getLogger().warn("Unhandled state: " + status);
cstatus = new CollectionOperationStatus(status);
}
if (cstatus.isSuccess()) {
rv.set(new Integer(cstatus.getMessage()),
new CollectionOperationStatus(new OperationStatus(true, "END")));
return;
}
rv.set(null, cstatus);
}
@Override
public void complete() {
latch.countDown();
}
});
rv.setOperation(op);
addOp(k, op);
return rv;
}
@Override
public CollectionFuture asyncBopGetItemCount(String key,
long from, long to,
ElementFlagFilter eFlagFilter) {
CollectionCount collectionCount = new BTreeCount(from, to, eFlagFilter);
return asyncCollectionCount(key, collectionCount);
}
@Override
public CollectionFuture asyncBopInsert(String key, long bkey,
byte[] eFlag, Object value,
CollectionAttributes attributesForCreate) {
BTreeInsert bTreeInsert = new BTreeInsert(value,
eFlag, (attributesForCreate != null), null, attributesForCreate);
return asyncCollectionInsert(key, String.valueOf(bkey), bTreeInsert, collectionTranscoder);
}
@Override
public CollectionFuture asyncMopInsert(String key, String mkey,
Object value,
CollectionAttributes attributesForCreate) {
validateMKey(mkey);
MapInsert mapInsert = new MapInsert(value,
(attributesForCreate != null), null, attributesForCreate);
return asyncCollectionInsert(key, mkey, mapInsert, collectionTranscoder);
}
@Override
public CollectionFuture asyncLopInsert(String key, int index,
Object value,
CollectionAttributes attributesForCreate) {
ListInsert listInsert = new ListInsert(value,
(attributesForCreate != null), null, attributesForCreate);
return asyncCollectionInsert(key, String.valueOf(index), listInsert, collectionTranscoder);
}
@Override
public CollectionFuture asyncSopInsert(String key, Object value,
CollectionAttributes attributesForCreate) {
SetInsert setInsert = new SetInsert(value,
(attributesForCreate != null), null, attributesForCreate);
return asyncCollectionInsert(key, "", setInsert, collectionTranscoder);
}
@Override
public CollectionFuture asyncBopInsert(String key, long bkey,
byte[] eFlag, T value,
CollectionAttributes attributesForCreate,
Transcoder tc) {
BTreeInsert bTreeInsert = new BTreeInsert(value, eFlag,
(attributesForCreate != null), null, attributesForCreate);
return asyncCollectionInsert(key, String.valueOf(bkey), bTreeInsert, tc);
}
@Override
public CollectionFuture asyncMopInsert(String key, String mkey,
T value,
CollectionAttributes attributesForCreate,
Transcoder tc) {
validateMKey(mkey);
MapInsert mapInsert = new MapInsert(value,
(attributesForCreate != null), null, attributesForCreate);
return asyncCollectionInsert(key, mkey, mapInsert, tc);
}
@Override
public CollectionFuture asyncLopInsert(String key, int index,
T value,
CollectionAttributes attributesForCreate,
Transcoder tc) {
ListInsert listInsert = new ListInsert(value, (attributesForCreate != null),
null, attributesForCreate);
return asyncCollectionInsert(key, String.valueOf(index), listInsert, tc);
}
@Override
public CollectionFuture asyncSopInsert(String key, T value,
CollectionAttributes attributesForCreate,
Transcoder tc) {
SetInsert setInsert = new SetInsert(value, (attributesForCreate != null),
null, attributesForCreate);
return asyncCollectionInsert(key, "", setInsert, tc);
}
@Override
public CollectionFuture> asyncBopPipedInsertBulk(
String key, Map elements,
CollectionAttributes attributesForCreate) {
return asyncBopPipedInsertBulk(key, elements, attributesForCreate, collectionTranscoder);
}
@Override
public CollectionFuture> asyncMopPipedInsertBulk(
String key, Map elements,
CollectionAttributes attributesForCreate) {
return asyncMopPipedInsertBulk(key, elements, attributesForCreate, collectionTranscoder);
}
@Override
public CollectionFuture> asyncLopPipedInsertBulk(
String key, int index, List valueList, CollectionAttributes attributesForCreate) {
return asyncLopPipedInsertBulk(key, index, valueList, attributesForCreate,
collectionTranscoder);
}
@Override
public CollectionFuture> asyncSopPipedInsertBulk(
String key, List valueList, CollectionAttributes attributesForCreate) {
return asyncSopPipedInsertBulk(key, valueList, attributesForCreate,
collectionTranscoder);
}
@Override
public CollectionFuture> asyncBopPipedInsertBulk(
String key, Map elements,
CollectionAttributes attributesForCreate, Transcoder tc) {
if (elements.size() <= CollectionPipedInsert.MAX_PIPED_ITEM_COUNT) {
BTreePipedInsert insert = new BTreePipedInsert(key, elements,
(attributesForCreate != null), attributesForCreate, tc);
return asyncCollectionPipedInsert(key, insert);
} else {
List> insertList = new ArrayList>();
PartitionedMap list = new PartitionedMap(
elements, CollectionPipedInsert.MAX_PIPED_ITEM_COUNT);
for (int i = 0; i < list.size(); i++) {
insertList.add(new BTreePipedInsert(key, list.get(i),
(attributesForCreate != null),
attributesForCreate, tc));
}
return asyncCollectionPipedInsert(key, insertList);
}
}
@Override
public CollectionFuture> asyncMopPipedInsertBulk(
String key, Map elements,
CollectionAttributes attributesForCreate, Transcoder tc) {
for (Map.Entry