All Downloads are FREE. Search and download functionalities are using the official Maven repository.

net.spy.memcached.ArcusClient Maven / Gradle / Ivy

/*
 * arcus-java-client : Arcus Java client
 * Copyright 2010-2014 NAVER Corp.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package net.spy.memcached;

import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantLock;
import java.util.jar.JarFile;
import java.util.jar.Manifest;

import net.spy.memcached.collection.Attributes;
import net.spy.memcached.collection.BKeyObject;
import net.spy.memcached.collection.BTreeCount;
import net.spy.memcached.collection.BTreeCreate;
import net.spy.memcached.collection.BTreeDelete;
import net.spy.memcached.collection.BTreeElement;
import net.spy.memcached.collection.BTreeFindPosition;
import net.spy.memcached.collection.BTreeFindPositionWithGet;
import net.spy.memcached.collection.BTreeGet;
import net.spy.memcached.collection.BTreeGetBulk;
import net.spy.memcached.collection.BTreeGetBulkWithByteTypeBkey;
import net.spy.memcached.collection.BTreeGetBulkWithLongTypeBkey;
import net.spy.memcached.collection.BTreeGetByPosition;
import net.spy.memcached.collection.BTreeGetResult;
import net.spy.memcached.collection.BTreeMutate;
import net.spy.memcached.collection.BTreeOrder;
import net.spy.memcached.collection.BTreeSMGet;
import net.spy.memcached.collection.BTreeSMGetWithByteTypeBkey;
import net.spy.memcached.collection.BTreeSMGetWithByteTypeBkeyOld;
import net.spy.memcached.collection.BTreeSMGetWithLongTypeBkey;
import net.spy.memcached.collection.BTreeSMGetWithLongTypeBkeyOld;
import net.spy.memcached.collection.BTreeStore;
import net.spy.memcached.collection.BTreeStoreAndGet;
import net.spy.memcached.collection.BTreeUpdate;
import net.spy.memcached.collection.BTreeUpsert;
import net.spy.memcached.collection.ByteArrayBKey;
import net.spy.memcached.collection.ByteArrayTreeMap;
import net.spy.memcached.collection.CollectionAttributes;
import net.spy.memcached.collection.CollectionBulkStore;
import net.spy.memcached.collection.CollectionCount;
import net.spy.memcached.collection.CollectionCreate;
import net.spy.memcached.collection.CollectionDelete;
import net.spy.memcached.collection.CollectionExist;
import net.spy.memcached.collection.CollectionGet;
import net.spy.memcached.collection.CollectionMutate;
import net.spy.memcached.collection.CollectionPipedStore;
import net.spy.memcached.collection.CollectionPipedStore.BTreePipedStore;
import net.spy.memcached.collection.CollectionPipedStore.ByteArraysBTreePipedStore;
import net.spy.memcached.collection.CollectionPipedStore.ListPipedStore;
import net.spy.memcached.collection.CollectionPipedStore.SetPipedStore;
import net.spy.memcached.collection.CollectionPipedUpdate;
import net.spy.memcached.collection.CollectionPipedUpdate.BTreePipedUpdate;
import net.spy.memcached.collection.CollectionResponse;
import net.spy.memcached.collection.CollectionStore;
import net.spy.memcached.collection.CollectionUpdate;
import net.spy.memcached.collection.Element;
import net.spy.memcached.collection.ElementFlagFilter;
import net.spy.memcached.collection.ElementFlagUpdate;
import net.spy.memcached.collection.ElementValueType;
import net.spy.memcached.collection.ExtendedBTreeGet;
import net.spy.memcached.collection.ListCreate;
import net.spy.memcached.collection.ListDelete;
import net.spy.memcached.collection.ListGet;
import net.spy.memcached.collection.ListStore;
import net.spy.memcached.collection.SMGetElement;
import net.spy.memcached.collection.SMGetTrimKey;
import net.spy.memcached.collection.SMGetMode;
import net.spy.memcached.collection.SetCreate;
import net.spy.memcached.collection.SetDelete;
import net.spy.memcached.collection.SetExist;
import net.spy.memcached.collection.SetGet;
import net.spy.memcached.collection.SetPipedExist;
import net.spy.memcached.collection.SetStore;
import net.spy.memcached.compat.log.Logger;
import net.spy.memcached.compat.log.LoggerFactory;
import net.spy.memcached.internal.BTreeStoreAndGetFuture;
import net.spy.memcached.internal.CheckedOperationTimeoutException;
import net.spy.memcached.internal.CollectionFuture;
import net.spy.memcached.internal.CollectionGetBulkFuture;
import net.spy.memcached.internal.OperationFuture;
import net.spy.memcached.internal.SMGetFuture;
import net.spy.memcached.ops.BTreeFindPositionOperation;
import net.spy.memcached.ops.BTreeFindPositionWithGetOperation;
import net.spy.memcached.ops.BTreeGetBulkOperation;
import net.spy.memcached.ops.BTreeGetByPositionOperation;
import net.spy.memcached.ops.BTreeSortMergeGetOperation;
import net.spy.memcached.ops.BTreeSortMergeGetOperationOld;
import net.spy.memcached.ops.BTreeStoreAndGetOperation;
import net.spy.memcached.ops.CollectionBulkStoreOperation;
import net.spy.memcached.ops.CollectionGetOperation;
import net.spy.memcached.ops.CollectionOperationStatus;
import net.spy.memcached.ops.CollectionPipedExistOperation;
import net.spy.memcached.ops.CollectionPipedStoreOperation;
import net.spy.memcached.ops.CollectionPipedUpdateOperation;
import net.spy.memcached.ops.ExtendedBTreeGetOperation;
import net.spy.memcached.ops.GetAttrOperation;
import net.spy.memcached.ops.Mutator;
import net.spy.memcached.ops.Operation;
import net.spy.memcached.ops.OperationCallback;
import net.spy.memcached.ops.OperationState;
import net.spy.memcached.ops.OperationStatus;
import net.spy.memcached.ops.StoreType;
import net.spy.memcached.plugin.FrontCacheMemcachedClient;
import net.spy.memcached.transcoders.CollectionTranscoder;
import net.spy.memcached.transcoders.Transcoder;
import net.spy.memcached.util.BTreeUtil;

/**
 * Client to a Arcus.
 * 
 * 

Basic usage

* *
 * final static String arcusAdminAddrs = "127.0.0.1:2181";
 * final static String serviceCode = "cafe";
 * 
 * ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder();
 * 
 * ArcusClient c = ArcusClient.createArcusClient(arcusAdminAddrs, serviceCode, cfb);
 * 
 * // Store a value (async) for one hour
 * c.set("someKey", 3600, someObject);
 * // Retrieve a value.
 * Future<Object> myFuture = c.asyncGet("someKey");
 * 
 * If pool style is needed, it will be used as follows
 * 
 * int poolSize = 4;
 * ArcusClientPool pool = ArcusClient.createArcusClientPool(arcusAdminAddrs, serviceCode, cfb, poolSize);
 * 
 * // Store a value
 * pool.set("someKey", 3600, someObject);
 * // Retrieve a value
 * Future<Object> myFuture = pool.asyncGet("someKey");
 * 
 * 
* */ public class ArcusClient extends FrontCacheMemcachedClient implements ArcusClientIF { static String VERSION; static Logger arcusLogger = LoggerFactory.getLogger("net.spy.memcached"); static final String ARCUS_CLOUD_ADDR = "127.0.0.1:2181"; public boolean dead; final BulkService bulkService; final Transcoder collectionTranscoder; final int smgetKeyChunkSize; static final int BOPGET_BULK_CHUNK_SIZE = 200; static final int NON_PIPED_BULK_INSERT_CHUNK_SIZE = 500; static final int MAX_GETBULK_KEY_COUNT = 200; static final int MAX_GETBULK_ELEMENT_COUNT = 50; static final int MAX_SMGET_COUNT = 1000; // server configuration is 2000. private CacheManager cacheManager; public void setCacheManager(CacheManager cacheManager) { this.cacheManager = cacheManager; } /** * * @param hostPorts * arcus admin addresses * @param serviceCode * service code * @param cfb * ConnectionFactoryBuilder * @return a single ArcusClient */ public static ArcusClient createArcusClient(String hostPorts, String serviceCode, ConnectionFactoryBuilder cfb) { return ArcusClient.createArcusClient(hostPorts, serviceCode, cfb, 1, 10000).getClient(); } /** * * @param serviceCode * service code * @param cfb * ConnectionFactoryBuilder * @return a single ArcusClient */ public static ArcusClient createArcusClient(String serviceCode, ConnectionFactoryBuilder cfb) { return ArcusClient.createArcusClient(ARCUS_CLOUD_ADDR, serviceCode, cfb, 1, 10000).getClient(); } /** * * @param hostPorts * arcus admin addresses * @param serviceCode * service code * @param poolSize * Arcus clinet pool size * @param cfb * ConnectionFactoryBuilder * @return multiple ArcusClient * */ public static ArcusClientPool createArcusClientPool(String hostPorts, String serviceCode, ConnectionFactoryBuilder cfb, int poolSize) { return ArcusClient.createArcusClient(hostPorts, serviceCode, cfb, poolSize, 0); } /** * * @param serviceCode * service code * @param poolSize * Arcus clinet pool size * @param cfb * ConnectionFactoryBuilder * @return multiple ArcusClient * */ public static ArcusClientPool createArcusClientPool(String serviceCode, ConnectionFactoryBuilder cfb, int poolSize) { return ArcusClient.createArcusClient(ARCUS_CLOUD_ADDR, serviceCode, cfb, poolSize, 0); } /** * * @param hostPorts * arcus admin addresses * @param serviceCode * service code * @param cfb * ConnectionFactoryBuilder * @param poolSize * Arcus clinet pool size * @param waitTimeFor Connect * waiting time for connection establishment(milliseconds) * * @return multiple ArcusClient */ private static ArcusClientPool createArcusClient(String hostPorts, String serviceCode, ConnectionFactoryBuilder cfb, int poolSize, int waitTimeForConnect) { if (hostPorts == null) { throw new NullPointerException("Arcus admin address required."); } if (serviceCode == null) { throw new NullPointerException("Service code required."); } if (hostPorts.isEmpty()) { throw new IllegalArgumentException("Arcus admin address is empty."); } if (serviceCode.isEmpty()) { throw new IllegalArgumentException("Service code is empty."); } if (VERSION == null) { VERSION = getVersion(); } final CountDownLatch latch = new CountDownLatch(1); CacheManager exe = new CacheManager( hostPorts, serviceCode, cfb, latch, poolSize, waitTimeForConnect); try { latch.await(); } catch (Exception e) { arcusLogger.warn("you cannot see this message!"); } ArcusClient[] client = exe.getAC(); return new ArcusClientPool(poolSize, client); } /** * Create an Arcus client for the given memcached server addresses. * * @param cf connection factory to configure connections for this client * @param addrs socket addresses for the memcached servers * @return Arcus client */ protected static ArcusClient getInstance(ConnectionFactory cf, List addrs) throws IOException { return new ArcusClient(cf, addrs); } /** * Create an Arcus client for the given memcached server addresses. * * @param cf connection factory to configure connections for this client * @param addrs socket addresses for the memcached servers * @throws IOException if connections cannot be established */ public ArcusClient(ConnectionFactory cf, List addrs) throws IOException { super(cf, addrs); bulkService = new BulkService(cf.getBulkServiceLoopLimit(), cf.getBulkServiceThreadCount(), cf.getBulkServiceSingleOpTimeout()); collectionTranscoder = new CollectionTranscoder(); smgetKeyChunkSize = cf.getDefaultMaxSMGetKeyChunkSize(); registerMbean(); } /** * Register mbean for Arcus client statistics. */ private void registerMbean() { if ("false".equals(System.getProperty("arcus.mbean", "false") .toLowerCase())) { getLogger().info("Arcus client statistics MBean is NOT registered."); return; } try { StatisticsHandler mbean = new StatisticsHandler(this); ArcusMBeanServer.getInstance().registMBean( mbean, mbean.getClass().getPackage().getName() + ":type=" + mbean.getClass().getSimpleName() + "-" + mbean.hashCode()); getLogger().info("Arcus client statistics MBean is registered."); } catch (Exception e) { getLogger().warn("Failed to initialize statistics mbean.", e); } } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#shutdown() */ @Override public void shutdown() { super.shutdown(); // Connect to Arcus server directly, cache manager may be null. if (cacheManager != null) { cacheManager.shutdown(); } dead = true; if (bulkService != null) { bulkService.shutdown(); } } Future asyncStore(StoreType storeType, String key, int exp, CachedData co) { final CountDownLatch latch=new CountDownLatch(1); final OperationFuture rv=new OperationFuture(latch, operationTimeout); Operation op=opFact.store(storeType, key, co.getFlags(), exp, co.getData(), new OperationCallback() { public void receivedStatus(OperationStatus val) { rv.set(val.isSuccess()); } public void complete() { latch.countDown(); }}); rv.setOperation(op); addOp(key, op); return rv; } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#asyncSetAttr(java.lang.String, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncSetAttr(String key, Attributes attrs) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); Operation op = opFact.setAttr(key, attrs, new OperationCallback() { public void receivedStatus(OperationStatus status) { if (status instanceof CollectionOperationStatus) { rv.set(status.isSuccess(), (CollectionOperationStatus) status); } else { getLogger().warn("Unhandled state: " + status); rv.set(status.isSuccess(), new CollectionOperationStatus(status)); } } public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(key, op); return rv; } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#asyncGetAttr(java.lang.String) */ @Override public CollectionFuture asyncGetAttr(final String key) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); Operation op = opFact.getAttr(key, new GetAttrOperation.Callback() { CollectionAttributes attrs = new CollectionAttributes(); public void receivedStatus(OperationStatus status) { CollectionOperationStatus stat; if (status instanceof CollectionOperationStatus) { stat = (CollectionOperationStatus) status; } else { stat = new CollectionOperationStatus(status); } rv.set(stat.isSuccess() ? attrs : null, stat); } public void complete() { latch.countDown(); } public void gotAttribute(String k, String attr) { assert key.equals(k) : "Wrong key returned"; attrs.setAttribute(attr); } }); rv.setOperation(op); addOp(key, op); return rv; } /** * Generic get operation for list items. Public methods for list items call this method. * * @param k list item's key * @param collectionGet operation parameters (element key and so on) * @param tc transcoder to serialize and unserialize value * @return future holding the fetched value */ private CollectionFuture> asyncLopGet(final String k, final CollectionGet collectionGet, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture> rv = new CollectionFuture>( latch, operationTimeout); Operation op = opFact.collectionGet(k, collectionGet, new CollectionGetOperation.Callback() { List list = new ArrayList(); public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(list, cstatus); return; } switch (cstatus.getResponse()) { case NOT_FOUND: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) not found : %s", k, cstatus); } break; case NOT_FOUND_ELEMENT: rv.set(list, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) not found : %s", k, cstatus); } break; case OUT_OF_RANGE: rv.set(list, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) not found in condition : %s", k, cstatus); } break; case UNREADABLE: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) is not readable : %s", k, cstatus); } break; default: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) unknown status : %s", k, cstatus); } break; } } public void complete() { latch.countDown(); } public void gotData(String key, long subkey, int flags, byte[] data) { assert key.equals(k) : "Wrong key returned"; list.add(tc.decode(new CachedData(flags, data, tc .getMaxSize()))); } }); rv.setOperation(op); addOp(k, op); return rv; } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#asyncSopExist(java.lang.String, T, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncSopExist(String key, T value, Transcoder tc) { SetExist exist = new SetExist(); exist.setValue(value); return asyncCollectionExist(key, "", exist, tc); } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#asyncSopExist(java.lang.String, java.lang.Object) */ @Override public CollectionFuture asyncSopExist(String key, Object value) { SetExist exist = new SetExist(); exist.setValue(value); return asyncCollectionExist(key, "", exist, collectionTranscoder); } /** * Generic get operation for set items. Public methods for set items call this method. * * @param k set item's key * @param collectionGet operation parameters (element key and so on) * @param tc transcoder to serialize and unserialize value * @return future holding the fetched value */ private CollectionFuture> asyncSopGet(final String k, final CollectionGet collectionGet, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture> rv = new CollectionFuture>(latch, operationTimeout); Operation op = opFact.collectionGet(k, collectionGet, new CollectionGetOperation.Callback() { Set set = new HashSet(); public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(set, cstatus); return; } switch (cstatus.getResponse()) { case NOT_FOUND: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) not found : %s", k, cstatus); } break; case NOT_FOUND_ELEMENT: rv.set(set, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) not found : %s", k, cstatus); } break; case UNREADABLE: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) is not readable : %s", k, cstatus); } break; default: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) unknown status : %s", k, cstatus); } break; } } public void complete() { latch.countDown(); } public void gotData(String key, long subkey, int flags, byte[] data) { assert key.equals(k) : "Wrong key returned"; set.add(tc.decode(new CachedData(flags, data, tc .getMaxSize()))); } }); rv.setOperation(op); addOp(k, op); return rv; } /** * Generic get operation for b+tree items. Public methods for b+tree items call this method. * * @param k b+tree item's key * @param collectionGet operation parameters (element keys and so on) * @param reverse false=forward or true=backward * @param tc transcoder to serialize and unserialize value * @return future holding the map of fetched elements and their keys */ private CollectionFuture>> asyncBopGet( final String k, final CollectionGet collectionGet, final boolean reverse, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture>> rv = new CollectionFuture>>( latch, operationTimeout); Operation op = opFact.collectionGet(k, collectionGet, new CollectionGetOperation.Callback() { TreeMap> map = new TreeMap>( (reverse) ? Collections.reverseOrder() : null); public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(map, cstatus); return; } switch (cstatus.getResponse()) { case NOT_FOUND: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) not found : %s", k, cstatus); } break; case NOT_FOUND_ELEMENT: rv.set(map, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) not found : %s", k, cstatus); } break; case UNREADABLE: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) is not readable : %s", k, cstatus); } break; default: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) Unknown response : %s", k, cstatus); } break; } } public void complete() { latch.countDown(); } public void gotData(String key, long subkey, int flags, byte[] data) { assert key.equals(k) : "Wrong key returned"; map.put(subkey, new Element(subkey, tc .decode(new CachedData(flags, data, tc .getMaxSize())), collectionGet .getElementFlag())); } }); rv.setOperation(op); addOp(k, op); return rv; } /** * Generic store operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param subkey element key (list index, b+tree bkey) * @param collectionStore operation parameters (value, eflags, attributes, and so on) * @param tc transcoder to serialize and unserialize value * @return future holding the success/failure of the operation */ private CollectionFuture asyncCollectionStore(String key, String subkey, CollectionStore collectionStore, Transcoder tc) { CachedData co = tc.encode(collectionStore.getValue()); collectionStore.setFlags(co.getFlags()); return asyncCollectionStore(key, subkey, collectionStore, co); } /** * Generic store operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param subkey element key (list index, b+tree bkey) * @param collectionStore operation parameters (value, eflags, attributes, and so on) * @param co transcoded value * @return future holding the success/failure of the operation */ CollectionFuture asyncCollectionStore(final String key, final String subkey, final CollectionStore collectionStore, final CachedData co) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); Operation op = opFact.collectionStore(key, subkey, collectionStore, co.getData(), new OperationCallback() { public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } rv.set(cstatus.isSuccess(), cstatus); if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) { getLogger().debug( "Insertion to the collection failed : " + cstatus.getMessage() + " (type=" + collectionStore.getClass() .getName() + ", key=" + key + ", subkey=" + subkey + ", value=" + collectionStore.getValue() + ")"); } } public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(key, op); return rv; } /** * Generic pipelined store operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param store operation parameters (values, attributes, and so on) * @return future holding the success/failure codes of individual operations and their index */ CollectionFuture> asyncCollectionPipedStore( final String key, final CollectionPipedStore store) { if (store.getItemCount() == 0) { throw new IllegalArgumentException( "The number of piped operations must be larger than 0."); } if (store.getItemCount() > CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { throw new IllegalArgumentException( "The number of piped operations must not exceed a maximum of " + CollectionPipedStore.MAX_PIPED_ITEM_COUNT + "."); } final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture> rv = new CollectionFuture>(latch, operationTimeout); Operation op = opFact.collectionPipedStore(key, store, new CollectionPipedStoreOperation.Callback() { Map result = new TreeMap(); public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } rv.set(result, cstatus); } public void complete() { latch.countDown(); } public void gotStatus(Integer index, OperationStatus status) { if (status instanceof CollectionOperationStatus) { result.put(index, (CollectionOperationStatus) status); } else { result.put(index, new CollectionOperationStatus(status)); } } }); rv.setOperation(op); addOp(key, op); return rv; } /** * Generic pipelined update operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param update operation parameters (values and so on) * @return future holding the success/failure codes of individual operations and their index */ CollectionFuture> asyncCollectionPipedUpdate( final String key, final CollectionPipedUpdate update) { if (update.getItemCount() == 0) { throw new IllegalArgumentException( "The number of piped operations must be larger than 0."); } if (update.getItemCount() > CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT) { throw new IllegalArgumentException( "The number of piped operations must not exceed a maximum of " + CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT + "."); } final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture> rv = new CollectionFuture>( latch, operationTimeout); Operation op = opFact.collectionPipedUpdate(key, update, new CollectionPipedUpdateOperation.Callback() { Map result = new TreeMap(); public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } rv.set(result, cstatus); } public void complete() { latch.countDown(); } public void gotStatus(Integer index, OperationStatus status) { if (status instanceof CollectionOperationStatus) { result.put(index, (CollectionOperationStatus) status); } else { result.put(index, new CollectionOperationStatus( status)); } } }); rv.setOperation(op); addOp(key, op); return rv; } /** * Generic pipelined update operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param updateList list of operation parameters (values and so on) * @return future holding the success/failure codes of individual operations and their index */ CollectionFuture> asyncCollectionPipedUpdate( final String key, final List> updateList) { final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); final CountDownLatch latch = new CountDownLatch(updateList.size()); final List mergedOperationStatus = Collections .synchronizedList(new ArrayList(1)); final Map mergedResult = new ConcurrentHashMap(); for (int i = 0; i < updateList.size(); i++) { final CollectionPipedUpdate update = updateList.get(i); final int idx = i; Operation op = opFact.collectionPipedUpdate(key, update, new CollectionPipedUpdateOperation.Callback() { // each result status public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } mergedOperationStatus.add(cstatus); } // complete public void complete() { latch.countDown(); } // got status public void gotStatus(Integer index, OperationStatus status) { if (status instanceof CollectionOperationStatus) { mergedResult .put(index + (idx * CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT), (CollectionOperationStatus) status); } else { mergedResult .put(index + (idx * CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT), new CollectionOperationStatus( status)); } } }); addOp(key, op); ops.add(op); } return new CollectionFuture>( latch, operationTimeout) { @Override public boolean cancel(boolean ign) { boolean rv = false; for (Operation op : ops) { op.cancel("by application."); rv |= op.getState() == OperationState.WRITING; } return rv; } @Override public boolean isCancelled() { for (Operation op : ops) { if (op.isCancelled()) return true; } return false; } @Override public Map get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { if (!latch.await(duration, units)) { for (Operation op : ops) { MemcachedConnection.opTimedOut(op); } throw new CheckedOperationTimeoutException( "Timed out waiting for operation", ops); } else { // continuous timeout counter will be reset for (Operation op : ops) { MemcachedConnection.opSucceeded(op); } } for (Operation op : ops) { if (op != null && op.hasErrored()) { throw new ExecutionException(op.getException()); } if (op.isCancelled()) { throw new ExecutionException(new RuntimeException(op.getCancelCause())); } } return mergedResult; } @Override public CollectionOperationStatus getOperationStatus() { for (OperationStatus status : mergedOperationStatus) { if (!status.isSuccess()) { return new CollectionOperationStatus(status); } } return new CollectionOperationStatus(true, "END", CollectionResponse.END); } }; } /** * Generic delete operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param collectionDelete operation parameters (element index/key, value, and so on) * @return future holding the success/failure of the operation */ private CollectionFuture asyncCollectionDelete( final String key, final CollectionDelete collectionDelete) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); Operation op = opFact.collectionDelete(key, collectionDelete, new OperationCallback() { public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } rv.set(cstatus.isSuccess(), cstatus); if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) { getLogger().debug( "Deletion to the collection failed : " + cstatus.getMessage() + " (type=" + collectionDelete.getClass() .getName() + ", key=" + key + ")"); } } public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(key, op); return rv; } /** * Generic existence operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param subkey element key (list index, b+tree bkey) * @param collectionExist operation parameters (element value and so on) * @param tc transcoder to serialize and unserialize value * @return future holding the success/failure of the operation */ private CollectionFuture asyncCollectionExist( final String key, final String subkey, final CollectionExist collectionExist, Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); CachedData cd = tc.encode(collectionExist.getValue()); collectionExist.setData(cd.getData()); Operation op = opFact.collectionExist(key, subkey, collectionExist, new OperationCallback() { public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } boolean isExist = (CollectionResponse.EXIST == cstatus .getResponse()) ? true : false; rv.set(isExist, cstatus); if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) { getLogger().debug( "Exist command to the collection failed : " + cstatus.getMessage() + " (type=" + collectionExist.getClass() .getName() + ", key=" + key + ", subkey=" + subkey + ")"); } } public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(key, op); return rv; } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.List, int, T, net.spy.memcached.transcoders.Transcoder) */ @Override public Future> asyncSetBulk(List key, int exp, T o, Transcoder tc) { return bulkService.setBulk(key, exp, o, tc, new ArcusClient[] { this }); } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.List, int, java.lang.Object) */ @Override public Future> asyncSetBulk(List key, int exp, Object o) { return asyncSetBulk(key, exp, o, transcoder); } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.Map, int, net.spy.memcached.transcoders.Transcoder) */ @Override public Future> asyncSetBulk(Map o, int exp, Transcoder tc) { return bulkService.setBulk(o, exp, tc, new ArcusClient[] { this }); } /* (non-Javadoc) * @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.Map, int) */ @Override public Future> asyncSetBulk(Map o, int exp) { return asyncSetBulk(o, exp, transcoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#getMaxPipedItemCount() */ @Override public int getMaxPipedItemCount() { return CollectionPipedStore.MAX_PIPED_ITEM_COUNT; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopCreate(java.lang.String, net.spy.memcached.collection.ElementValueType, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncBopCreate(String key, ElementValueType valueType, CollectionAttributes attributes) { int flag = CollectionTranscoder.examineFlags(valueType); boolean noreply = false; CollectionCreate bTreeCreate = new BTreeCreate(flag, attributes.getExpireTime(), attributes.getMaxCount(), attributes.getOverflowAction(), attributes.getReadable(), noreply); return asyncCollectionCreate(key, bTreeCreate); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopCreate(java.lang.String, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncSopCreate(String key, ElementValueType type, CollectionAttributes attributes) { int flag = CollectionTranscoder.examineFlags(type); boolean noreply = false; CollectionCreate setCreate = new SetCreate(flag, attributes.getExpireTime(), attributes.getMaxCount(), attributes.getReadable(), noreply); return asyncCollectionCreate(key, setCreate); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopCreate(java.lang.String, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncLopCreate(String key, ElementValueType type, CollectionAttributes attributes) { int flag = CollectionTranscoder.examineFlags(type); boolean noreply = false; CollectionCreate listCreate = new ListCreate(flag, attributes.getExpireTime(), attributes.getMaxCount(), attributes.getOverflowAction(), attributes.getReadable(), noreply); return asyncCollectionCreate(key, listCreate); } /** * Generic create operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param collectionCreate operation parameters (flags, expiration time, and so on) * @return future holding the success/failure of the operation */ CollectionFuture asyncCollectionCreate(final String key, final CollectionCreate collectionCreate) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); Operation op = opFact.collectionCreate(key, collectionCreate, new OperationCallback() { @Override public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } rv.set(cstatus.isSuccess(), cstatus); if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) { getLogger() .debug("Insertion to the collection failed : " + cstatus.getMessage() + " (type=" + collectionCreate.getClass() .getName() + ", key=" + key + ", attribute=" + collectionCreate.toString() + ")"); } } @Override public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(key, op); return rv; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, boolean, boolean) */ @Override public CollectionFuture>> asyncBopGet(String key, long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty) { BTreeGet get = new BTreeGet(bkey, withDelete, dropIfEmpty, eFlagFilter); return asyncBopGet(key, get, false, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, long, int, int, boolean, boolean) */ @Override public CollectionFuture>> asyncBopGet(String key, long from, long to, ElementFlagFilter eFlagFilter, int offset, int count, boolean withDelete, boolean dropIfEmpty) { BTreeGet get = new BTreeGet(from, to, offset, count, withDelete, dropIfEmpty, eFlagFilter); boolean reverse = from > to; return asyncBopGet(key, get, reverse, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, boolean, boolean, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture>> asyncBopGet(String key, long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { BTreeGet get = new BTreeGet(bkey, withDelete, dropIfEmpty, eFlagFilter); return asyncBopGet(key, get, false, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, long, int, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture>> asyncBopGet(String key, long from, long to, ElementFlagFilter eFlagFilter, int offset, int count, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { BTreeGet get = new BTreeGet(from, to, offset, count, withDelete, dropIfEmpty, eFlagFilter); boolean reverse = from > to; return asyncBopGet(key, get, reverse, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, boolean, boolean) */ @Override public CollectionFuture> asyncLopGet(String key, int index, boolean withDelete, boolean dropIfEmpty) { ListGet get = new ListGet(index, withDelete, dropIfEmpty); return asyncLopGet(key, get, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, int, boolean, boolean) */ @Override public CollectionFuture> asyncLopGet(String key, int from, int to, boolean withDelete, boolean dropIfEmpty) { ListGet get = new ListGet(from, to, withDelete, dropIfEmpty); return asyncLopGet(key, get, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncLopGet(String key, int index, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { ListGet get = new ListGet(index, withDelete, dropIfEmpty); return asyncLopGet(key, get, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncLopGet(String key, int from, int to, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { ListGet get = new ListGet(from, to, withDelete, dropIfEmpty); return asyncLopGet(key, get, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopGet(java.lang.String, int, boolean, boolean) */ @Override public CollectionFuture> asyncSopGet(String key, int count, boolean withDelete, boolean dropIfEmpty) { SetGet get = new SetGet(count, withDelete, dropIfEmpty); return asyncSopGet(key, get, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopGet(java.lang.String, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncSopGet(String key, int count, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { SetGet get = new SetGet(count, withDelete, dropIfEmpty); return asyncSopGet(key, get, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, long, boolean) */ @Override public CollectionFuture asyncBopDelete(String key, long bkey, ElementFlagFilter eFlagFilter, boolean dropIfEmpty) { BTreeDelete delete = new BTreeDelete(bkey, false, dropIfEmpty, eFlagFilter); return asyncCollectionDelete(key, delete); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, long, long, int, boolean) */ @Override public CollectionFuture asyncBopDelete(String key, long from, long to, ElementFlagFilter eFlagFilter, int count, boolean dropIfEmpty) { BTreeDelete delete = new BTreeDelete(from, to, count, false, dropIfEmpty, eFlagFilter); return asyncCollectionDelete(key, delete); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopDelete(java.lang.String, int, boolean) */ @Override public CollectionFuture asyncLopDelete(String key, int index, boolean dropIfEmpty) { ListDelete delete = new ListDelete(index, false, dropIfEmpty); return asyncCollectionDelete(key, delete); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopDelete(java.lang.String, int, int, boolean) */ @Override public CollectionFuture asyncLopDelete(String key, int from, int to, boolean dropIfEmpty) { ListDelete delete = new ListDelete(from, to, false, dropIfEmpty); return asyncCollectionDelete(key, delete); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopDelete(java.lang.String, java.lang.Object, boolean) */ @Override public CollectionFuture asyncSopDelete(String key, Object value, boolean dropIfEmpty) { SetDelete delete = new SetDelete(value, false, dropIfEmpty); delete.setData(collectionTranscoder.encode(value).getData()); return asyncCollectionDelete(key, delete); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopDelete(java.lang.String, java.lang.Object, boolean, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncSopDelete(String key, T value, boolean dropIfEmpty, Transcoder tc) { SetDelete delete = new SetDelete(value, false, dropIfEmpty); delete.setData(tc.encode(value).getData()); return asyncCollectionDelete(key, delete); } /** * Generic count operation for collection items. Public methods for collection items call this method. * * @param k collection item's key * @param collectionCount operation parameters (element key range, eflags, and so on) * @return future holding the element count */ private CollectionFuture asyncCollectionCount(final String k, final CollectionCount collectionCount) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); Operation op = opFact.collectionCount(k, collectionCount, new OperationCallback() { @Override public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(new Integer(cstatus.getMessage()), new CollectionOperationStatus( new OperationStatus(true, "END"))); return; } rv.set(null, cstatus); } @Override public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(k, op); return rv; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGetItemCount(java.lang.String, long, long) */ @Override public CollectionFuture asyncBopGetItemCount(String key, long from, long to, ElementFlagFilter eFlagFilter) { CollectionCount collectionCount = new BTreeCount(from, to, eFlagFilter); return asyncCollectionCount(key, collectionCount); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, byte[], long, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncBopInsert(String key, long bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { BTreeStore bTreeStore = new BTreeStore(value, eFlag, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionStore(key, String.valueOf(bkey), bTreeStore, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopInsert(java.lang.String, int, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncLopInsert(String key, int index, Object value, CollectionAttributes attributesForCreate) { ListStore listStore = new ListStore(value, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionStore(key, String.valueOf(index), listStore, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopInsert(java.lang.String, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncSopInsert(String key, Object value, CollectionAttributes attributesForCreate) { SetStore setStore = new SetStore(value, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionStore(key, "", setStore, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, long, byte[], java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncBopInsert(String key, long bkey, byte[] eFlag, T value, CollectionAttributes attributesForCreate, Transcoder tc) { BTreeStore bTreeStore = new BTreeStore(value, eFlag, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionStore(key, String.valueOf(bkey), bTreeStore, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopInsert(java.lang.String, int, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncLopInsert(String key, int index, T value, CollectionAttributes attributesForCreate, Transcoder tc) { ListStore listStore = new ListStore(value, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionStore(key, String.valueOf(index), listStore, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopInsert(java.lang.String, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncSopInsert(String key, T value, CollectionAttributes attributesForCreate, Transcoder tc) { SetStore setStore = new SetStore(value, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionStore(key, "", setStore, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.Map, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture> asyncBopPipedInsertBulk( String key, Map elements, CollectionAttributes attributesForCreate) { return asyncBopPipedInsertBulk(key, elements, attributesForCreate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopPipedInsertBulk(java.lang.String, int, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture> asyncLopPipedInsertBulk( String key, int index, List valueList, CollectionAttributes attributesForCreate) { return asyncLopPipedInsertBulk(key, index, valueList, attributesForCreate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture> asyncSopPipedInsertBulk( String key, List valueList, CollectionAttributes attributesForCreate) { return asyncSopPipedInsertBulk(key, valueList, attributesForCreate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.Map, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncBopPipedInsertBulk( String key, Map elements, CollectionAttributes attributesForCreate, Transcoder tc) { if (elements.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { BTreePipedStore store = new BTreePipedStore(key, elements, (attributesForCreate != null), attributesForCreate, tc); return asyncCollectionPipedStore(key, store); } else { List> storeList = new ArrayList>(); PartitionedMap list = new PartitionedMap( elements, CollectionPipedStore.MAX_PIPED_ITEM_COUNT); for (int i = 0; i < list.size(); i++) { storeList .add(new BTreePipedStore(key, list.get(i), (attributesForCreate != null), attributesForCreate, tc)); } return asyncCollectionPipedStore(key, storeList); } } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopPipedInsertBulk(java.lang.String, int, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncLopPipedInsertBulk( String key, int index, List valueList, CollectionAttributes attributesForCreate, Transcoder tc) { if (valueList.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { ListPipedStore store = new ListPipedStore(key, index, valueList, (attributesForCreate != null), attributesForCreate, tc); return asyncCollectionPipedStore(key, store); } else { PartitionedList list = new PartitionedList(valueList, CollectionPipedStore.MAX_PIPED_ITEM_COUNT); List> storeList = new ArrayList>( list.size()); for (int i = 0; i < list.size(); i++) { storeList .add(new ListPipedStore(key, index, list.get(i), (attributesForCreate != null), attributesForCreate, tc)); } return asyncCollectionPipedStore(key, storeList); } } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncSopPipedInsertBulk( String key, List valueList, CollectionAttributes attributesForCreate, Transcoder tc) { if (valueList.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { SetPipedStore store = new SetPipedStore(key, valueList, (attributesForCreate != null), attributesForCreate, tc); return asyncCollectionPipedStore(key, store); } else { PartitionedList list = new PartitionedList(valueList, CollectionPipedStore.MAX_PIPED_ITEM_COUNT); List> storeList = new ArrayList>( list.size()); for (int i = 0; i < list.size(); i++) { storeList .add(new SetPipedStore(key, list.get(i), (attributesForCreate != null), attributesForCreate, tc)); } return asyncCollectionPipedStore(key, storeList); } } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#flush(java.lang.String) */ @Override public OperationFuture flush(final String prefix) { return flush(prefix, -1); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#flush(java.lang.String, int) */ @Override public OperationFuture flush(final String prefix, final int delay) { final AtomicReference flushResult = new AtomicReference( null); final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); final CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { public Operation newOp(final MemcachedNode n, final CountDownLatch latch) { Operation op = opFact.flush(prefix, delay, false, new OperationCallback() { public void receivedStatus(OperationStatus s) { flushResult.set(s.isSuccess()); } public void complete() { latch.countDown(); } }); ops.add(op); return op; } }); return new OperationFuture(blatch, flushResult, operationTimeout) { @Override public boolean cancel(boolean ign) { boolean rv = false; for (Operation op : ops) { op.cancel("by application."); rv |= op.getState() == OperationState.WRITING; } return rv; } @Override public boolean isCancelled() { for (Operation op : ops) { if (op.isCancelled()) return true; } return false; } @Override public Boolean get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { if(!blatch.await(duration, units)) { // whenever timeout occurs, continuous timeout counter will increase by 1. for (Operation op : ops) { MemcachedConnection.opTimedOut(op); } throw new CheckedOperationTimeoutException( "Timed out waiting for operation. >" + duration, ops); } else { // continuous timeout counter will be reset for (Operation op : ops) { MemcachedConnection.opSucceeded(op); } } for (Operation op : ops) { if(op != null && op.hasErrored()) { throw new ExecutionException(op.getException()); } if(op != null && op.isCancelled()) { throw new ExecutionException(new RuntimeException(op.getCancelCause())); } } return flushResult.get(); } @Override public boolean isDone() { boolean rv = true; for (Operation op : ops) { rv &= op.getState() == OperationState.COMPLETE; } return rv || isCancelled(); } }; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopSortMergeGet(java.util.List, long, long, int, int) */ @Override public SMGetFuture>> asyncBopSortMergeGet( List keyList, long from, long to, ElementFlagFilter eFlagFilter, int offset, int count) { if (keyList == null || keyList.isEmpty()) { throw new IllegalArgumentException("Key list is empty."); } if (offset < 0) { throw new IllegalArgumentException("Offset must be 0 or positive integer."); } if (count < 1) { throw new IllegalArgumentException("Count must be larger than 0."); } if (offset + count > MAX_SMGET_COUNT) { throw new IllegalArgumentException( "The sum of offset and count must not exceed a maximum of " + MAX_SMGET_COUNT + "."); } Map> arrangedKey = groupingKeys(keyList, smgetKeyChunkSize); List> smGetList = new ArrayList>( arrangedKey.size()); for (List v : arrangedKey.values()) { if (arrangedKey.size() > 1) { smGetList.add(new BTreeSMGetWithLongTypeBkeyOld(v, from, to, eFlagFilter, 0, offset + count)); }else { smGetList.add(new BTreeSMGetWithLongTypeBkeyOld(v, from, to, eFlagFilter, offset, count)); } } return smget(smGetList, offset, count, (from > to), collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopSortMergeGet(java.util.List, long, long, int, int, boolean) */ @Override public SMGetFuture>> asyncBopSortMergeGet( List keyList, long from, long to, ElementFlagFilter eFlagFilter, int count, SMGetMode smgetMode) { if (keyList == null || keyList.isEmpty()) { throw new IllegalArgumentException("Key list is empty."); } if (count < 1) { throw new IllegalArgumentException("Count must be larger than 0."); } if (count > MAX_SMGET_COUNT) { throw new IllegalArgumentException("The count must not exceed a maximum of " + MAX_SMGET_COUNT + "."); } Map> arrangedKey = groupingKeys(keyList, smgetKeyChunkSize); List> smGetList = new ArrayList>( arrangedKey.size()); for (List v : arrangedKey.values()) { smGetList.add(new BTreeSMGetWithLongTypeBkey(v, from, to, eFlagFilter, count, smgetMode)); } return smget(smGetList, count, (from > to), collectionTranscoder, smgetMode); } /** * Turn the list of keys into groups of keys. All keys in a group belong to the same memcached server. * * @param keyList list of keys * @param groupSize max size of the key group (number of keys) * @return map of group name (memcached node + sequence number) and keys in the group */ private Map> groupingKeys(List keyList, int groupSize) { Map chunkCount = new HashMap(); Map> result = new HashMap>(); Set keySet = new HashSet(); MemcachedConnection conn = getMemcachedConnection(); for (String k : keyList) { validateKey(k); if (!keySet.add(k)) { throw new IllegalArgumentException("Duplicate keys exist in key list."); } String node = conn.findNodeByKey(k).getSocketAddress().toString(); int cc; if (chunkCount.containsKey(node)) { cc = chunkCount.get(node); } else { cc = 0; chunkCount.put(node, 0); } String resultKey = node + cc; List arrangedKeyList = null; if (result.containsKey(resultKey)) { if (result.get(resultKey).size() >= groupSize) { arrangedKeyList = new ArrayList(); cc++; result.put(node + cc, arrangedKeyList); chunkCount.put(node, cc); } else { arrangedKeyList = result.get(resultKey); } } else { arrangedKeyList = new ArrayList(); result.put(resultKey, arrangedKeyList); } arrangedKeyList.add(k); } return result; } /** * Get the sublist of elements from the smget result. * * @param mergedResult smget result (list of elements) * @param offset start index, negative offset indicates "start from the tail" * @param count number of elements to get * @return list of elements */ private List> getSubList( final List> mergedResult, int offset, int count) { if (mergedResult.size() > count) { int toIndex = (count + offset > mergedResult.size()) ? mergedResult .size() : count + offset; if (offset > toIndex) return Collections.emptyList(); return mergedResult.subList(offset, toIndex); } else { if (offset > 0) { int toIndex = (count + offset > mergedResult.size()) ? mergedResult .size() : count + offset; if (offset > toIndex) return Collections.emptyList(); return mergedResult.subList(offset, toIndex); } else { return mergedResult; } } } /** * Generic smget operation for b+tree items. Public smget methods call this method. * * @param smGetList smget parameters (keys, eflags, and so on) * @param offset start index of the elements * @param count number of elements to fetch * @param reverse forward or backward * @param tc transcoder to serialize and unserialize element value * @return future holding the smget result (elements, return codes, and so on) */ private SMGetFuture>> smget( final List> smGetList, final int offset, final int count, final boolean reverse, final Transcoder tc) { final String END = "END"; final String TRIMMED = "TRIMMED"; final String DUPLICATED = "DUPLICATED"; final String DUPLICATED_TRIMMED = "DUPLICATED_TRIMMED"; final CountDownLatch blatch = new CountDownLatch(smGetList.size()); final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); final Map missedKeys = Collections.synchronizedMap(new HashMap()); final List mergedTrimmedKeys = Collections.synchronizedList(new ArrayList()); final List missedKey = Collections.synchronizedList(new ArrayList()); final int totalResultElementCount = count + offset; final List> mergedResult = Collections.synchronizedList(new ArrayList>(totalResultElementCount)); final ReentrantLock lock = new ReentrantLock(); final List resultOperationStatus = Collections.synchronizedList(new ArrayList(1)); final List failedOperationStatus = Collections.synchronizedList(new ArrayList(1)); final Set totalBkey = new TreeSet(); final AtomicBoolean stopCollect = new AtomicBoolean(false); for (BTreeSMGet smGet : smGetList) { Operation op = opFact.bopsmget(smGet, new BTreeSortMergeGetOperationOld.Callback() { final List> eachResult = new ArrayList>(); private void addTotalBkey(List> smgetresult) { for (SMGetElement each : smgetresult) { if (each.getBkeyByObject() instanceof byte[]) { totalBkey.add(new ByteArrayBKey((byte[]) each.getBkeyByObject())); } else { totalBkey.add(each.getBkeyByObject()); } } } private boolean addTotalBkey(Object bkey) { if (bkey instanceof byte[]) { return totalBkey.add(new ByteArrayBKey((byte[])bkey)); } else { return totalBkey.add(bkey); } } @Override public void receivedStatus(OperationStatus status) { if (status.isSuccess()) { resultOperationStatus.add(status); } else { stopCollect.set(true); mergedResult.clear(); failedOperationStatus.add(status); } if (status.isSuccess()) { lock.lock(); try { // merged result is empty, add all. if (smGetList.size() == 1) { addTotalBkey(eachResult); mergedResult.addAll(eachResult); } else { // merged result is empty, add all. if (mergedResult.size() == 0) { addTotalBkey(eachResult); mergedResult.addAll(eachResult); } else { // remove trimmed area if (TRIMMED.equals(status.getMessage())) { } // do sort merge for (SMGetElement result : eachResult) { boolean added = false; for (int i = 0; i < mergedResult.size(); i++) { if (i > totalResultElementCount) { added = true; break; } if ((reverse) ? (0 < result.compareTo(mergedResult.get(i))) : 0 > result .compareTo(mergedResult.get(i))) { if (!addTotalBkey(result.getBkeyByObject())) { resultOperationStatus.add(new OperationStatus(true, "DUPLICATED")); } mergedResult.add(i, result); added = true; break; } } if (!added) { if (!addTotalBkey(result.getBkeyByObject())) { resultOperationStatus.add(new OperationStatus(true, "DUPLICATED")); } mergedResult.add(result); } } } } } finally { lock.unlock(); } } else { getLogger().warn("SMGetFailed. status=%s", status); } } @Override public void complete() { blatch.countDown(); } @Override public void gotData(String key, Object subkey, int flags, byte[] data) { if (stopCollect.get()) return; if (subkey instanceof Long) { eachResult.add(new SMGetElement(key, (Long) subkey, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); } else if (subkey instanceof byte[]) { eachResult.add(new SMGetElement(key, (byte[]) subkey, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); } } @Override public void gotMissedKey(byte[] data) { missedKey.add(new String(data)); } }); ops.add(op); addOp(smGet.getRepresentKey(), op); } return new SMGetFuture>>(ops, operationTimeout) { @Override public List> get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { if (!blatch.await(duration, units)) { for (Operation op : ops) { MemcachedConnection.opTimedOut(op); } throw new CheckedOperationTimeoutException( "Timed out waiting for operation", ops); } else { // continuous timeout counter will be reset for (Operation op : ops) { MemcachedConnection.opSucceeded(op); } } for (Operation op : ops) { if (op != null && op.hasErrored()) { throw new ExecutionException(op.getException()); } if (op.isCancelled()) { throw new ExecutionException(new RuntimeException( op.getCancelCause())); } } if (smGetList.size() == 1) return mergedResult; return getSubList(mergedResult, offset, count); } @Override public List getMissedKeyList() { return missedKey; } @Override public Map getMissedKeys() { return missedKeys; } @Override public List getTrimmedKeys() { return mergedTrimmedKeys; } @Override public CollectionOperationStatus getOperationStatus() { if (failedOperationStatus.size() > 0) { return new CollectionOperationStatus( failedOperationStatus.get(0)); } OperationStatus end = null; OperationStatus duplicated = null; OperationStatus trimmed = null; OperationStatus duplicatedTrimmed = null; for (OperationStatus status : resultOperationStatus) { if (END.equals(status.getMessage())) end = status; else if (DUPLICATED.equals(status.getMessage())) duplicated = status; else if (TRIMMED.equals(status.getMessage())) trimmed = status; else if (DUPLICATED_TRIMMED.equals(status.getMessage())) duplicatedTrimmed = status; } if (end == null && duplicated == null && trimmed == null && duplicatedTrimmed == null) { getLogger().warn("[sort merge get] invalid result status."); return null; } if (duplicatedTrimmed == null && duplicated != null && trimmed != null) duplicatedTrimmed = new OperationStatus(true, "DUPLICATED_TRIMMED"); if (duplicatedTrimmed != null) return new CollectionOperationStatus(duplicatedTrimmed); else if (duplicated != null) return new CollectionOperationStatus(duplicated); else if (trimmed != null) return new CollectionOperationStatus(trimmed); else return new CollectionOperationStatus(end); } }; } private SMGetFuture>> smget( final List> smGetList, final int count, final boolean reverse, final Transcoder tc, final SMGetMode smgetMode) { final String END = "END"; final String TRIMMED = "TRIMMED"; final String DUPLICATED = "DUPLICATED"; final String DUPLICATED_TRIMMED = "DUPLICATED_TRIMMED"; final CountDownLatch blatch = new CountDownLatch(smGetList.size()); final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); final Map missedKeys = Collections.synchronizedMap(new HashMap()); final List missedKeyList = Collections.synchronizedList(new ArrayList()); final int totalResultElementCount = count; final List> mergedResult = Collections.synchronizedList(new ArrayList>(totalResultElementCount)); final List mergedTrimmedKeys = Collections.synchronizedList(new ArrayList()); final ReentrantLock lock = new ReentrantLock(); final List resultOperationStatus = Collections.synchronizedList(new ArrayList(1)); final List failedOperationStatus = Collections.synchronizedList(new ArrayList(1)); final Set totalBkey = new TreeSet(); final AtomicBoolean stopCollect = new AtomicBoolean(false); /* if processedSMGetCount is 0, then all smget is done */ final AtomicInteger processedSMGetCount = new AtomicInteger(smGetList.size()); for (BTreeSMGet smGet : smGetList) { Operation op = opFact.bopsmget(smGet, new BTreeSortMergeGetOperation.Callback() { final List> eachResult = new ArrayList>(); final List eachTrimmedResult = new ArrayList(); private void addTotalBkey(List> smgetresult) { for (SMGetElement each : smgetresult) { if (each.getBkeyByObject() instanceof byte[]) { totalBkey.add(new ByteArrayBKey((byte[]) each.getBkeyByObject())); } else { totalBkey.add(each.getBkeyByObject()); } } } private boolean addTotalBkey(Object bkey) { if (bkey instanceof byte[]) { return totalBkey.add(new ByteArrayBKey((byte[])bkey)); } else { return totalBkey.add(bkey); } } @Override public void receivedStatus(OperationStatus status) { processedSMGetCount.decrementAndGet(); if (status.isSuccess()) { resultOperationStatus.add(status); } else { stopCollect.set(true); mergedResult.clear(); mergedTrimmedKeys.clear(); failedOperationStatus.add(status); } if (status.isSuccess()) { lock.lock(); try { if (mergedResult.size() == 0) { /* merged result is empty, add all */ if (smGetList.size() > 1) { addTotalBkey(eachResult); } mergedResult.addAll(eachResult); } else { /* do sort merge */ boolean duplicated; int idx, pos = 0; for (SMGetElement result : eachResult) { for (idx = pos; idx < mergedResult.size(); idx++) { if ((reverse) ? (0 < result.compareTo(mergedResult.get(idx))) : (0 > result.compareTo(mergedResult.get(idx)))) break; } if (idx >= totalResultElementCount) { /* At this point, following conditions are met. * - mergedResult.size() == totalResultElementCount && * - The current of eachResult is * behind of the last of mergedResult. * Then, all the next elements of eachResult are * definitely behind of the last of mergedResult. * So, stop the current sort-merge. */ break; } if (!addTotalBkey(result.getBkeyByObject())) { resultOperationStatus.add(new OperationStatus(true, "DUPLICATED")); duplicated = true; } else { duplicated = false; } if (smgetMode != SMGetMode.UNIQUE || !duplicated) { mergedResult.add(idx, result); if (mergedResult.size() > totalResultElementCount) mergedResult.remove(totalResultElementCount); pos = idx + 1; } else { pos = idx; } } } if (eachTrimmedResult.size() > 0) { if (mergedTrimmedKeys.size() == 0) { mergedTrimmedKeys.addAll(eachTrimmedResult); } else { /* do sort merge trimmed list */ int idx, pos = 0; for (SMGetTrimKey eTrim : eachTrimmedResult) { for (idx = pos; idx < mergedTrimmedKeys.size(); idx++) { if ((reverse) ? (0 < eTrim.compareTo(mergedTrimmedKeys.get(idx))) : 0 > eTrim.compareTo(mergedTrimmedKeys.get(idx))) { break; } } mergedTrimmedKeys.add(idx, eTrim); pos = idx + 1; } } } /* remove useless trimed keys */ if (mergedTrimmedKeys.size() > 0 && processedSMGetCount.get() == 0 && count <= mergedResult.size()) { SMGetElement lastElement = mergedResult.get(count - 1); SMGetTrimKey lastTrimKey = new SMGetTrimKey(lastElement.getKey(), lastElement.getBkeyByObject()); for (int idx = mergedTrimmedKeys.size() - 1; idx >= 0; idx--) { SMGetTrimKey me = mergedTrimmedKeys.get(idx); if ((reverse) ? (0 >= me.compareTo(lastTrimKey)) : 0 <= me.compareTo(lastTrimKey)) { mergedTrimmedKeys.remove(idx); } else { break; } } } } finally { lock.unlock(); } } else { getLogger().warn("SMGetFailed. status=%s", status); } } @Override public void complete() { blatch.countDown(); } @Override public void gotData(String key, Object subkey, int flags, byte[] data) { if (stopCollect.get()) return; if (subkey instanceof Long) { eachResult.add(new SMGetElement(key, (Long) subkey, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); } else if (subkey instanceof byte[]) { eachResult.add(new SMGetElement(key, (byte[]) subkey, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); } } @Override public void gotMissedKey(String key, OperationStatus cause) { if (cause.getMessage().equals("UNDEFINED")) missedKeyList.add(key); else missedKeys.put(key, new CollectionOperationStatus(cause)); } @Override public void gotTrimmedKey(String key, Object subkey) { if (stopCollect.get()) return; if (subkey instanceof Long) { eachTrimmedResult.add(new SMGetTrimKey(key, (Long)subkey)); } else if (subkey instanceof byte[]) { eachTrimmedResult.add(new SMGetTrimKey(key, (byte[])subkey)); } } }); ops.add(op); addOp(smGet.getRepresentKey(), op); } return new SMGetFuture>>(ops, operationTimeout) { @Override public List> get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { if (!blatch.await(duration, units)) { for (Operation op : ops) { MemcachedConnection.opTimedOut(op); } throw new CheckedOperationTimeoutException( "Timed out waiting for operation", ops); } else { // continuous timeout counter will be reset for (Operation op : ops) { MemcachedConnection.opSucceeded(op); } } for (Operation op : ops) { if (op != null && op.hasErrored()) { throw new ExecutionException(op.getException()); } if (op.isCancelled()) { throw new ExecutionException(new RuntimeException(op.getCancelCause())); } } if (smGetList.size() == 1) return mergedResult; return getSubList(mergedResult, 0, count); } @Override public Map getMissedKeys() { return missedKeys; } @Override public List getMissedKeyList() { return missedKeyList; } @Override public List getTrimmedKeys() { return mergedTrimmedKeys; } @Override public CollectionOperationStatus getOperationStatus() { if (failedOperationStatus.size() > 0) { return new CollectionOperationStatus( failedOperationStatus.get(0)); } OperationStatus end = null; OperationStatus duplicated = null; OperationStatus trimmed = null; OperationStatus duplicatedTrimmed = null; for (OperationStatus status : resultOperationStatus) { if (END.equals(status.getMessage())) end = status; else if (DUPLICATED.equals(status.getMessage())) duplicated = status; else if (TRIMMED.equals(status.getMessage())) trimmed = status; else if (DUPLICATED_TRIMMED.equals(status.getMessage())) duplicatedTrimmed = status; } if (end == null && duplicated == null && trimmed == null && duplicatedTrimmed == null) { getLogger().warn("[sort merge get] invalid result status."); return null; } if (duplicatedTrimmed == null && duplicated != null && trimmed != null) duplicatedTrimmed = new OperationStatus(true, "DUPLICATED_TRIMMED"); if (duplicatedTrimmed != null) return new CollectionOperationStatus(duplicatedTrimmed); else if (duplicated != null) return new CollectionOperationStatus(duplicated); else if (trimmed != null) return new CollectionOperationStatus(trimmed); else return new CollectionOperationStatus(end); } }; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopUpsert(java.lang.String, long, java.lang.Object, byte[], boolean, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncBopUpsert(String key, long bkey, byte[] elementFlag, Object value, CollectionAttributes attributesForCreate) { BTreeUpsert bTreeStore = new BTreeUpsert(value, elementFlag, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionUpsert(key, String.valueOf(bkey), bTreeStore, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopUpsert(java.lang.String, long, java.lang.Object, byte[], boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncBopUpsert(String key, long bkey, byte[] elementFlag, T value, CollectionAttributes attributesForCreate, Transcoder tc) { BTreeUpsert bTreeStore = new BTreeUpsert(value, elementFlag, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionUpsert(key, String.valueOf(bkey), bTreeStore, tc); } private CollectionFuture asyncCollectionUpsert( final String key, final String subkey, final CollectionStore collectionStore, Transcoder tc) { CachedData co = tc.encode(collectionStore.getValue()); collectionStore.setFlags(co.getFlags()); final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); Operation op = opFact.collectionUpsert(key, subkey, collectionStore, co.getData(), new OperationCallback() { public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } rv.set(cstatus.isSuccess(), cstatus); if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) { getLogger().debug( "Insertion to the collection failed : " + cstatus.getMessage() + " (type=" + collectionStore.getClass() .getName() + ", key=" + key + ", subkey=" + subkey + ", value=" + collectionStore.getValue() + ")"); } } public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(key, op); return rv; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, long, java.lang.Object, net.spy.memcached.collection.ElementFlagUpdate) */ @Override public CollectionFuture asyncBopUpdate(String key, long bkey, ElementFlagUpdate eFlagUpdate, Object value) { BTreeUpdate collectionUpdate = new BTreeUpdate( value, eFlagUpdate, false); return asyncCollectionUpdate(key, String.valueOf(bkey), collectionUpdate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, long, java.lang.Object, net.spy.memcached.collection.ElementFlagUpdate, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncBopUpdate(String key, long bkey, ElementFlagUpdate eFlagUpdate, T value, Transcoder tc) { BTreeUpdate collectionUpdate = new BTreeUpdate(value, eFlagUpdate, false); return asyncCollectionUpdate(key, String.valueOf(bkey), collectionUpdate, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, byte[], net.spy.memcached.collection.ElementFlagUpdate, java.lang.Object) */ @Override public CollectionFuture asyncBopUpdate(String key, byte[] bkey, ElementFlagUpdate eFlagUpdate, Object value) { BTreeUpdate collectionUpdate = new BTreeUpdate( value, eFlagUpdate, false); return asyncCollectionUpdate(key, BTreeUtil.toHex(bkey), collectionUpdate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, byte[], net.spy.memcached.collection.ElementFlagUpdate, java.lang.Object, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncBopUpdate(String key, byte[] bkey, ElementFlagUpdate eFlagUpdate, T value, Transcoder tc) { BTreeUpdate collectionUpdate = new BTreeUpdate(value, eFlagUpdate, false); return asyncCollectionUpdate(key, BTreeUtil.toHex(bkey), collectionUpdate, tc); } /** * Generic update operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param subkey element key (list index, b+tree bkey) * @param collectionUpdate operation parameters (element value and so on) * @param tc transcoder to serialize and unserialize value * @return future holding the success/failure of the operation */ private CollectionFuture asyncCollectionUpdate( final String key, final String subkey, final CollectionUpdate collectionUpdate, Transcoder tc) { CachedData co = null; if (collectionUpdate.getNewValue() != null) { co = tc.encode(collectionUpdate.getNewValue()); collectionUpdate.setFlags(co.getFlags()); } final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture( latch, operationTimeout); Operation op = opFact.collectionUpdate(key, subkey, collectionUpdate, ((co == null) ? null : co.getData()), new OperationCallback() { public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } rv.set(cstatus.isSuccess(), cstatus); if (!cstatus.isSuccess() && getLogger().isDebugEnabled()) { getLogger().debug( "Insertion to the collection failed : " + cstatus.getMessage() + " (type=" + collectionUpdate.getClass() .getName() + ", key=" + key + ", subkey=" + subkey + ", value=" + collectionUpdate.getNewValue() + ")"); } } public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(key, op); return rv; } /* * (non-Javadoc) * * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, * byte[], net.spy.memcached.collection.ElementFlagUpdate, java.lang.Object, * net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncBopPipedUpdateBulk( String key, List> elements) { return asyncBopPipedUpdateBulk(key, elements, collectionTranscoder); } @Override public CollectionFuture> asyncBopPipedUpdateBulk( String key, List> elements, Transcoder tc) { if (elements.size() <= CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT) { CollectionPipedUpdate collectionPipedUpdate = new BTreePipedUpdate( key, elements, tc); return asyncCollectionPipedUpdate(key, collectionPipedUpdate); } else { PartitionedList> list = new PartitionedList>( elements, CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT); List> collectionPipedUpdateList = new ArrayList>( list.size()); for (int i = 0; i < list.size(); i++) { collectionPipedUpdateList.add(new BTreePipedUpdate(key, list .get(i), tc)); } return asyncCollectionPipedUpdate(key, collectionPipedUpdateList); } } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, byte[], java.lang.Object, byte[], boolean, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncBopInsert(String key, byte[] bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { BTreeStore bTreeStore = new BTreeStore(value, eFlag, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionStore(key, BTreeUtil.toHex(bkey), bTreeStore, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, byte[], java.lang.Object, byte[], boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncBopInsert(String key, byte[] bkey, byte[] eFlag, T value, CollectionAttributes attributesForCreate, Transcoder tc) { BTreeStore bTreeStore = new BTreeStore(value, eFlag, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionStore(key, BTreeUtil.toHex(bkey), bTreeStore, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, byte[], byte[], int, int, boolean, boolean, net.spy.memcached.collection.ElementFlagFilter) */ @Override public CollectionFuture>> asyncBopGet( String key, byte[] bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty) { ExtendedBTreeGet get = new ExtendedBTreeGet(bkey, bkey, 0, 1, withDelete, dropIfEmpty, eFlagFilter); return asyncBopExtendedGet(key, get, false, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, byte[], net.spy.memcached.collection.ElementFlagFilter, boolean, boolean, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture>> asyncBopGet( String key, byte[] bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { ExtendedBTreeGet get = new ExtendedBTreeGet(bkey, bkey, 0, 1, withDelete, dropIfEmpty, eFlagFilter); return asyncBopExtendedGet(key, get, false, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, byte[], byte[], int, int, boolean, boolean, net.spy.memcached.collection.ElementFlagFilter) */ @Override public CollectionFuture>> asyncBopGet(String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, int count, boolean withDelete, boolean dropIfEmpty) { ExtendedBTreeGet get = new ExtendedBTreeGet(from, to, offset, count, withDelete, dropIfEmpty, eFlagFilter); boolean reverse = BTreeUtil.compareByteArraysInLexOrder(from, to) > 0; return asyncBopExtendedGet(key, get, reverse, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture>> asyncBopGet( String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, int count, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { ExtendedBTreeGet get = new ExtendedBTreeGet(from, to, offset, count, withDelete, dropIfEmpty, eFlagFilter); boolean reverse = BTreeUtil.compareByteArraysInLexOrder(from, to) > 0; return asyncBopExtendedGet(key, get, reverse, tc); } /** * Generic get operation for b+tree items using byte-array type bkeys. Public methods for b+tree items call this method. * * @param k b+tree item's key * @param collectionGet operation parameters (element key and so on) * @param reverse forward or backward * @param tc transcoder to serialize and unserialize value * @return future holding the map of the fetched element and its byte-array bkey */ private CollectionFuture>> asyncBopExtendedGet( final String k, final CollectionGet collectionGet, final boolean reverse, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture>> rv = new CollectionFuture>>( latch, operationTimeout); Operation op = opFact.collectionGet2(k, collectionGet, new ExtendedBTreeGetOperation.Callback() { TreeMap> map = new ByteArrayTreeMap>( (reverse) ? Collections.reverseOrder() : null); public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(map, cstatus); return; } switch (cstatus.getResponse()) { case NOT_FOUND: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) not found : %s", k, cstatus); } break; case NOT_FOUND_ELEMENT: rv.set(map, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) not found : %s", k, cstatus); } break; case UNREADABLE: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) is not readable : %s", k, cstatus); } break; } } public void complete() { latch.countDown(); } public void gotData(String key, byte[] subkey, byte[] elementFlag, int flags, byte[] data) { assert key.equals(k) : "Wrong key returned"; Element element = new Element(subkey, tc .decode(new CachedData(flags, data, tc .getMaxSize())), elementFlag); map.put(new ByteArrayBKey(subkey), element); } }); rv.setOperation(op); addOp(k, op); return rv; } @Override public CollectionFuture>> asyncBopGetByPosition( String key, BTreeOrder order, int pos) { BTreeGetByPosition get = new BTreeGetByPosition(order, pos); boolean reverse = false; return asyncBopGetByPosition(key, get, reverse, collectionTranscoder); } @Override public CollectionFuture>> asyncBopGetByPosition( String key, BTreeOrder order, int pos, Transcoder tc) { BTreeGetByPosition get = new BTreeGetByPosition(order, pos); boolean reverse = false; return asyncBopGetByPosition(key, get, reverse, tc); } @Override public CollectionFuture>> asyncBopGetByPosition( String key, BTreeOrder order, int from, int to) { BTreeGetByPosition get = new BTreeGetByPosition(order, from, to); boolean reverse = from > to; return asyncBopGetByPosition(key, get, reverse, collectionTranscoder); } @Override public CollectionFuture>> asyncBopGetByPosition( String key, BTreeOrder order, int from, int to, Transcoder tc) { BTreeGetByPosition get = new BTreeGetByPosition(order, from, to); boolean reverse = from > to; return asyncBopGetByPosition(key, get, reverse, tc); } /** * Generic get operation for b+tree items using positions. Public methods for b+tree items call this method. * * @param k b+tree item's key * @param get operation parameters (element position and so on) * @param reverse forward or backward * @param tc transcoder to serialize and unserialize value * @return future holding the map of the fetched element and its position */ private CollectionFuture>> asyncBopGetByPosition( final String k, final BTreeGetByPosition get, final boolean reverse, final Transcoder tc) { // Check for invalid arguments (not to get CLIENT_ERROR) if (get.getOrder() == null) { throw new IllegalArgumentException("BTreeOrder must not be null."); } if (get.getPosFrom() < 0 || get.getPosTo() < 0) { throw new IllegalArgumentException("Position must be 0 or positive integer."); } final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture>> rv = new CollectionFuture>>( latch, operationTimeout); Operation op = opFact.bopGetByPosition(k, get, new BTreeGetByPositionOperation.Callback() { TreeMap> map = new TreeMap>( (reverse) ? Collections.reverseOrder() : null); public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(map, cstatus); return; } switch (cstatus.getResponse()) { case NOT_FOUND: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) not found : %s", k, cstatus); } break; case NOT_FOUND_ELEMENT: rv.set(map, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) not found : %s", k, cstatus); } break; case UNREADABLE: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) is not readable : %s", k, cstatus); } break; case TYPE_MISMATCH: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) is not a B+Tree : %s", k, cstatus); } break; default: getLogger().warn("Unhandled state: " + status); } } public void complete() { latch.countDown(); } public void gotData(String key, int flags, int pos, BKeyObject bkeyObject, byte[] eflag, byte[] data) { assert key.equals(k) : "Wrong key returned"; Element element = makeBTreeElement(key, flags, bkeyObject, eflag, data, tc); if (element != null) { map.put(pos, element); } } }); rv.setOperation(op); addOp(k, op); return rv; } @Override public CollectionFuture asyncBopFindPosition(String key, long longBKey, BTreeOrder order) { if (order == null) { throw new IllegalArgumentException("BTreeOrder must not be null."); } BTreeFindPosition get = new BTreeFindPosition(longBKey, order); return asyncBopFindPosition(key, get); } @Override public CollectionFuture asyncBopFindPosition(String key, byte[] byteArrayBKey, BTreeOrder order) { if (order == null) { throw new IllegalArgumentException("BTreeOrder must not be null."); } BTreeFindPosition get = new BTreeFindPosition(byteArrayBKey, order); return asyncBopFindPosition(key, get); } /** * Generic find-position operation for b+tree items. Public methods for b+tree items call this method. * * @param k b+tree item's key * @param get operation parameters (element key and so on) * @return future holding the element's position */ private CollectionFuture asyncBopFindPosition(final String k, final BTreeFindPosition get) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture(latch, operationTimeout); Operation op = opFact.bopFindPosition(k, get, new BTreeFindPositionOperation.Callback() { int position = 0; public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(position, cstatus); return; } switch (cstatus.getResponse()) { case NOT_FOUND: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) not found : %s", k, cstatus); } break; case NOT_FOUND_ELEMENT: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) not found : %s", k, cstatus); } break; case UNREADABLE: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) is not readable : %s", k, cstatus); } break; case BKEY_MISMATCH: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) has wrong bkey : %s(%s)", k, cstatus, get.getBkeyObject().getType()); } break; case TYPE_MISMATCH: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) is not a B+Tree : %s", k, cstatus); } break; default: getLogger().warn("Unhandled state: " + status); } } public void complete() { latch.countDown(); } public void gotData(int position) { this.position = position; } }); rv.setOperation(op); addOp(k, op); return rv; } @Override public CollectionFuture>> asyncBopFindPositionWithGet( String key, long longBKey, BTreeOrder order, int count) { BTreeFindPositionWithGet get = new BTreeFindPositionWithGet(longBKey, order, count); return asyncBopFindPositionWithGet(key, get, collectionTranscoder); } @Override public CollectionFuture>> asyncBopFindPositionWithGet( String key, long longBKey, BTreeOrder order, int count, Transcoder tc) { BTreeFindPositionWithGet get = new BTreeFindPositionWithGet(longBKey, order, count); return asyncBopFindPositionWithGet(key, get, tc); } @Override public CollectionFuture>> asyncBopFindPositionWithGet( String key, byte[] byteArrayBKey, BTreeOrder order, int count) { BTreeFindPositionWithGet get = new BTreeFindPositionWithGet(byteArrayBKey, order, count); return asyncBopFindPositionWithGet(key, get, collectionTranscoder); } @Override public CollectionFuture>> asyncBopFindPositionWithGet( String key, byte[] byteArrayBKey, BTreeOrder order, int count, Transcoder tc) { BTreeFindPositionWithGet get = new BTreeFindPositionWithGet(byteArrayBKey, order, count); return asyncBopFindPositionWithGet(key, get, tc); } /** * Generic find position with get operation for b+tree items. Public methods for b+tree items call this method. * * @param k b+tree item's key * @param get operation parameters (element key and so on) * @param tc transcoder to serialize and unserialize value * @return future holding the element's position */ private CollectionFuture>> asyncBopFindPositionWithGet( final String k, final BTreeFindPositionWithGet get, final Transcoder tc) { if (get.getOrder() == null) { throw new IllegalArgumentException("BTreeOrder must not be null."); } if (get.getCount() < 0 || get.getCount() > 100) { throw new IllegalArgumentException("Count must be a value between 0 and 100."); } final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture>> rv = new CollectionFuture>>( latch, operationTimeout); Operation op = opFact.bopFindPositionWithGet(k, get, new BTreeFindPositionWithGetOperation.Callback() { TreeMap> map = new TreeMap>(); public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(map, cstatus); return; } switch (cstatus.getResponse()) { case NOT_FOUND: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Key(%s) not found : %s", k, cstatus); } break; case NOT_FOUND_ELEMENT: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Element(%s) not found : %s", k, cstatus); } break; case UNREADABLE: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) is not readable : %s", k, cstatus); } break; case BKEY_MISMATCH: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) has wrong bkey : %s(%s)", k, cstatus, get.getBkeyObject().getType()); } break; case TYPE_MISMATCH: rv.set(null, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Collection(%s) is not a B+Tree : %s", k, cstatus); } break; default: getLogger().warn("Unhandled state: " + status); } } public void complete() { latch.countDown(); } public void gotData(String key, int flags, int pos, BKeyObject bkeyObject, byte[] eflag, byte[] data) { assert key.equals(k) : "Wrong key returned"; Element element = makeBTreeElement(key, flags, bkeyObject, eflag, data, tc); if (element != null) { map.put(pos, element); } } }); rv.setOperation(op); addOp(k, op); return rv; } @Override public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( String key, long bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { BTreeStoreAndGet get = new BTreeStoreAndGet( BTreeStoreAndGet.Command.INSERT, bkey, eFlag, value, attributesForCreate); return asyncBTreeStoreAndGet(key, get, collectionTranscoder); } @Override public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( String key, long bkey, byte[] eFlag, E value, CollectionAttributes attributesForCreate, Transcoder transcoder) { BTreeStoreAndGet get = new BTreeStoreAndGet( BTreeStoreAndGet.Command.INSERT, bkey, eFlag, value, attributesForCreate); return asyncBTreeStoreAndGet(key, get, transcoder); } @Override public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( String key, byte[] bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { BTreeStoreAndGet get = new BTreeStoreAndGet( BTreeStoreAndGet.Command.INSERT, bkey, eFlag, value, attributesForCreate); return asyncBTreeStoreAndGet(key, get, collectionTranscoder); } @Override public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( String key, byte[] bkey, byte[] eFlag, E value, CollectionAttributes attributesForCreate, Transcoder transcoder) { BTreeStoreAndGet get = new BTreeStoreAndGet( BTreeStoreAndGet.Command.INSERT, bkey, eFlag, value, attributesForCreate); return asyncBTreeStoreAndGet(key, get, transcoder); } @Override public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( String key, long bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { BTreeStoreAndGet get = new BTreeStoreAndGet( BTreeStoreAndGet.Command.UPSERT, bkey, eFlag, value, attributesForCreate); return asyncBTreeStoreAndGet(key, get, collectionTranscoder); } @Override public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( String key, long bkey, byte[] eFlag, E value, CollectionAttributes attributesForCreate, Transcoder transcoder) { BTreeStoreAndGet get = new BTreeStoreAndGet( BTreeStoreAndGet.Command.UPSERT, bkey, eFlag, value, attributesForCreate); return asyncBTreeStoreAndGet(key, get, transcoder); } @Override public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( String key, byte[] bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { BTreeStoreAndGet get = new BTreeStoreAndGet( BTreeStoreAndGet.Command.UPSERT, bkey, eFlag, value, attributesForCreate); return asyncBTreeStoreAndGet(key, get, collectionTranscoder); } @Override public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( String key, byte[] bkey, byte[] eFlag, E value, CollectionAttributes attributesForCreate, Transcoder transcoder) { BTreeStoreAndGet get = new BTreeStoreAndGet( BTreeStoreAndGet.Command.UPSERT, bkey, eFlag, value, attributesForCreate); return asyncBTreeStoreAndGet(key, get, transcoder); } /** * Insert/upsert and get the trimmed element for b+tree items. Public methods call this method. * * @param k b+tree item's key * @param get operation parameters (element key and so on) * @param tc transcoder to serialize and unserialize value * @return future holding the success/failure of the operation and the trimmed element */ private BTreeStoreAndGetFuture asyncBTreeStoreAndGet( final String k, final BTreeStoreAndGet get, final Transcoder tc) { CachedData co = tc.encode(get.getValue()); get.setFlags(co.getFlags()); final CountDownLatch latch = new CountDownLatch(1); final BTreeStoreAndGetFuture rv = new BTreeStoreAndGetFuture( latch, operationTimeout); Operation op = opFact.bopStoreAndGet(k, get, co.getData(), new BTreeStoreAndGetOperation.Callback() { Element element = null; public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } if (cstatus.isSuccess()) { rv.set(true, cstatus); rv.setElement(element); return; } switch (cstatus.getResponse()) { case NOT_FOUND: case ELEMENT_EXISTS: case OVERFLOWED: case OUT_OF_RANGE: case TYPE_MISMATCH: case BKEY_MISMATCH: rv.set(false, cstatus); if (getLogger().isDebugEnabled()) { getLogger().debug("Request for \"%s\" was not successful : %s", k, cstatus); } break; default: getLogger().warn("Unhandled state: " + status); } } public void complete() { latch.countDown(); } public void gotData(String key, int flags, BKeyObject bkeyObject, byte[] eflag, byte[] data) { assert key.equals(k) : "Wrong key returned"; element = makeBTreeElement(key, flags, bkeyObject, eflag, data, tc); } }); rv.setOperation(op); addOp(k, op); return rv; } /** * Utility method to create a b+tree element from individual parameters. * * @param key b+tree item's key * @param flags item flags, used when creating the item (see createKeyIfNotExists) * @param bkey element key * @param eflag element flags * @param value element value * @param tc transcoder to serialize and unserialize value * @return element object containing all the parameters and transcoded value */ private Element makeBTreeElement(String key, int flags, BKeyObject bkey, byte[] eflag, byte[] data, Transcoder tc) { Element element = null; T value = tc.decode(new CachedData(flags, data, tc.getMaxSize())); switch (bkey.getType()) { case LONG: element = new Element(bkey.getLongBKey(), value, eflag); break; case BYTEARRAY: element = new Element(bkey.getByteArrayBKeyRaw(), value, eflag); break; default: getLogger().error( "Unexpected bkey type : (key:" + key + ", bkey:" + bkey.toString() + ")"); } return element; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, boolean) */ @Override public CollectionFuture asyncBopDelete(String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int count, boolean dropIfEmpty) { BTreeDelete delete = new BTreeDelete(from, to, count, false, dropIfEmpty, eFlagFilter); return asyncCollectionDelete(key, delete); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, byte[], net.spy.memcached.collection.ElementFlagFilter, boolean) */ @Override public CollectionFuture asyncBopDelete(String key, byte[] bkey, ElementFlagFilter eFlagFilter, boolean dropIfEmpty) { BTreeDelete delete = new BTreeDelete(bkey, false, dropIfEmpty, eFlagFilter); return asyncCollectionDelete(key, delete); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopUpsert(java.lang.String, byte[], byte[], java.lang.Object, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture asyncBopUpsert(String key, byte[] bkey, byte[] elementFlag, Object value, CollectionAttributes attributesForCreate) { BTreeUpsert bTreeStore = new BTreeUpsert(value, elementFlag, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionUpsert(key, BTreeUtil.toHex(bkey), bTreeStore, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopUpsert(java.lang.String, byte[], byte[], java.lang.Object, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture asyncBopUpsert(String key, byte[] bkey, byte[] elementFlag, T value, CollectionAttributes attributesForCreate, Transcoder tc) { BTreeUpsert bTreeStore = new BTreeUpsert(value, elementFlag, (attributesForCreate != null), null, attributesForCreate); return asyncCollectionUpsert(key, BTreeUtil.toHex(bkey), bTreeStore, tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGetItemCount(java.lang.String, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter) */ @Override public CollectionFuture asyncBopGetItemCount(String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter) { CollectionCount collectionCount = new BTreeCount(from, to, eFlagFilter); return asyncCollectionCount(key, collectionCount); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopPipedExistBulk(java.lang.String, java.util.List) */ @Override public CollectionFuture> asyncSopPipedExistBulk(String key, List values) { SetPipedExist exist = new SetPipedExist(key, values, collectionTranscoder); return asyncSetPipedExist(key, exist); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopPipedExistBulk(java.lang.String, java.util.List, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncSopPipedExistBulk(String key, List values, Transcoder tc) { SetPipedExist exist = new SetPipedExist(key, values, tc); return asyncSetPipedExist(key, exist); } /** * Generic pipelined existence operation for set items. Public methods call this method. * * @param key collection item's key * @param exist operation parameters (element values) * @return future holding the map of elements and their existence results */ CollectionFuture> asyncSetPipedExist( final String key, final SetPipedExist exist) { if (exist.getItemCount() == 0) { throw new IllegalArgumentException( "The number of piped operations must be larger than 0."); } if (exist.getItemCount() > CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { throw new IllegalArgumentException( "The number of piped operations must not exceed a maximum of " + CollectionPipedStore.MAX_PIPED_ITEM_COUNT + "."); } final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture> rv = new CollectionFuture>( latch, operationTimeout); Operation op = opFact.collectionPipedExist(key, exist, new CollectionPipedExistOperation.Callback() { Map result = new HashMap(); boolean hasAnError = false; public void receivedStatus(OperationStatus status) { if (hasAnError) return; CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } rv.set(result, cstatus); } public void complete() { latch.countDown(); } public void gotStatus(Integer index, OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { cstatus = new CollectionOperationStatus(status); } switch (cstatus.getResponse()) { case EXIST: case NOT_EXIST: result.put(exist.getValues().get(index), (CollectionResponse.EXIST.equals(cstatus .getResponse()))); break; case UNREADABLE: case TYPE_MISMATCH: case NOT_FOUND: hasAnError = true; rv.set(new HashMap(0), (CollectionOperationStatus) status); break; default: getLogger().warn("Unhandled state: " + status); } } }); rv.setOperation(op); addOp(key, op); return rv; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes) */ @Override public CollectionFuture> asyncBopPipedInsertBulk( String key, List> elements, CollectionAttributes attributesForCreate) { return asyncBopPipedInsertBulk(key, elements, attributesForCreate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public CollectionFuture> asyncBopPipedInsertBulk( String key, List> elements, CollectionAttributes attributesForCreate, Transcoder tc) { if (elements.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { CollectionPipedStore store = new ByteArraysBTreePipedStore( key, elements, (attributesForCreate != null), attributesForCreate, tc); return asyncCollectionPipedStore(key, store); } else { PartitionedList> list = new PartitionedList>( elements, CollectionPipedStore.MAX_PIPED_ITEM_COUNT); List> storeList = new ArrayList>( list.size()); for (int i = 0; i < list.size(); i++) { storeList.add(new ByteArraysBTreePipedStore(key, list.get(i), (attributesForCreate != null), attributesForCreate, tc)); } return asyncCollectionPipedStore(key, storeList); } } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopSortMergeGet(java.util.List, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int) */ @Override public SMGetFuture>> asyncBopSortMergeGet( List keyList, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, int count) { if (keyList == null || keyList.isEmpty()) { throw new IllegalArgumentException("Key list is empty."); } if (count < 1) { throw new IllegalArgumentException("Count must be larger than 0."); } if (offset + count > MAX_SMGET_COUNT) { throw new IllegalArgumentException( "The sum of offset and count must not exceed a maximum of " + MAX_SMGET_COUNT + "."); } Map> arrangedKey = groupingKeys(keyList, smgetKeyChunkSize); List> smGetList = new ArrayList>( arrangedKey.size()); for (List v : arrangedKey.values()) { if (arrangedKey.size() > 1) { smGetList.add(new BTreeSMGetWithByteTypeBkeyOld(v, from, to, eFlagFilter, 0, offset + count)); }else { smGetList.add(new BTreeSMGetWithByteTypeBkeyOld(v, from, to, eFlagFilter, offset, count)); } } return smget(smGetList, offset, count, (BTreeUtil.compareByteArraysInLexOrder(from, to) > 0), collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopSortMergeGet(java.util.List, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int, boolean) */ @Override public SMGetFuture>> asyncBopSortMergeGet( List keyList, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int count, SMGetMode smgetMode) { if (keyList == null || keyList.isEmpty()) { throw new IllegalArgumentException("Key list is empty."); } if (count < 1) { throw new IllegalArgumentException("Count must be larger than 0."); } if (count > MAX_SMGET_COUNT) { throw new IllegalArgumentException("The count must not exceed a maximum of " + MAX_SMGET_COUNT + "."); } Map> arrangedKey = groupingKeys(keyList, smgetKeyChunkSize); List> smGetList = new ArrayList>( arrangedKey.size()); for (List v : arrangedKey.values()) { smGetList.add(new BTreeSMGetWithByteTypeBkey(v, from, to, eFlagFilter, count, smgetMode)); } return smget(smGetList, count, (BTreeUtil.compareByteArraysInLexOrder(from, to) > 0), collectionTranscoder, smgetMode); } /** * Generic pipelined store operation for collection items. Public methods for collection items call this method. * * @param key collection item's key * @param storeList list of operation parameters (element values and so on) * @return future holding the map of element index and the result of its store operation */ CollectionFuture> asyncCollectionPipedStore( final String key, final List> storeList) { final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); final CountDownLatch latch = new CountDownLatch(storeList.size()); final List mergedOperationStatus = Collections .synchronizedList(new ArrayList(1)); final Map mergedResult = new ConcurrentHashMap(); for (int i = 0; i < storeList.size(); i++) { final CollectionPipedStore store = storeList.get(i); final int idx = i; Operation op = opFact.collectionPipedStore(key, store, new CollectionPipedStoreOperation.Callback() { // each result status public void receivedStatus(OperationStatus status) { CollectionOperationStatus cstatus; if (status instanceof CollectionOperationStatus) { cstatus = (CollectionOperationStatus) status; } else { getLogger().warn("Unhandled state: " + status); cstatus = new CollectionOperationStatus(status); } mergedOperationStatus.add(cstatus); } // complete public void complete() { latch.countDown(); } // got status public void gotStatus(Integer index, OperationStatus status) { if (status instanceof CollectionOperationStatus) { mergedResult .put(index + (idx * CollectionPipedStore.MAX_PIPED_ITEM_COUNT), (CollectionOperationStatus) status); } else { mergedResult .put(index + (idx * CollectionPipedStore.MAX_PIPED_ITEM_COUNT), new CollectionOperationStatus( status)); } } }); addOp(key, op); ops.add(op); } return new CollectionFuture>( latch, operationTimeout) { @Override public boolean cancel(boolean ign) { boolean rv = false; for (Operation op : ops) { op.cancel("by application."); rv |= op.getState() == OperationState.WRITING; } return rv; } @Override public boolean isCancelled() { for (Operation op : ops) { if (op.isCancelled()) return true; } return false; } @Override public Map get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { if (!latch.await(duration, units)) { for (Operation op : ops) { MemcachedConnection.opTimedOut(op); } throw new CheckedOperationTimeoutException( "Timed out waiting for operation", ops); } else { // continuous timeout counter will be reset for (Operation op : ops) { MemcachedConnection.opSucceeded(op); } } for (Operation op : ops) { if (op != null && op.hasErrored()) { throw new ExecutionException(op.getException()); } if (op.isCancelled()) { throw new ExecutionException(new RuntimeException(op.getCancelCause())); } } return mergedResult; } @Override public CollectionOperationStatus getOperationStatus() { for (OperationStatus status : mergedOperationStatus) { if (!status.isSuccess()) { return new CollectionOperationStatus(status); } } return new CollectionOperationStatus(true, "END", CollectionResponse.END); } }; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopInsertBulk(java.util.List, long, byte[], java.lang.Object, net.spy.memcached.collection.CollectionAttributes) */ @Override public Future> asyncBopInsertBulk( List keyList, long bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { return asyncBopInsertBulk(keyList, bkey, eFlag, value, attributesForCreate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopInsertBulk(java.util.List, long, byte[], java.lang.Object, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public Future> asyncBopInsertBulk( List keyList, long bkey, byte[] eFlag, T value, CollectionAttributes attributesForCreate, Transcoder tc) { Map> arrangedKey = groupingKeys(keyList, NON_PIPED_BULK_INSERT_CHUNK_SIZE); List> storeList = new ArrayList>( arrangedKey.size()); for (List eachKeyList : arrangedKey.values()) { storeList.add(new CollectionBulkStore.BTreeBulkStore( eachKeyList, bkey, eFlag, value, attributesForCreate, tc)); } return asyncCollectionInsertBulk2(storeList); } @Override public Future> asyncBopInsertBulk( List keyList, byte[] bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { return asyncBopInsertBulk(keyList, bkey, eFlag, value, attributesForCreate, collectionTranscoder); } @Override public Future> asyncBopInsertBulk( List keyList, byte[] bkey, byte[] eFlag, T value, CollectionAttributes attributesForCreate, Transcoder tc) { Map> arrangedKey = groupingKeys(keyList, NON_PIPED_BULK_INSERT_CHUNK_SIZE); List> storeList = new ArrayList>( arrangedKey.size()); for (List eachKeyList : arrangedKey.values()) { storeList.add(new CollectionBulkStore.BTreeBulkStore( eachKeyList, bkey, eFlag, value, attributesForCreate, tc)); } return asyncCollectionInsertBulk2(storeList); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopInsertBulk(java.util.List, java.lang.Object, net.spy.memcached.collection.CollectionAttributes) */ @Override public Future> asyncSopInsertBulk( List keyList, Object value, CollectionAttributes attributesForCreate) { return asyncSopInsertBulk(keyList, value, attributesForCreate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncSopInsertBulk(java.util.List, java.lang.Object, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public Future> asyncSopInsertBulk( List keyList, T value, CollectionAttributes attributesForCreate, Transcoder tc) { Map> arrangedKey = groupingKeys(keyList, NON_PIPED_BULK_INSERT_CHUNK_SIZE); List> storeList = new ArrayList>( arrangedKey.size()); for (List eachKeyList : arrangedKey.values()) { storeList.add(new CollectionBulkStore.SetBulkStore( eachKeyList, value, attributesForCreate, tc)); } return asyncCollectionInsertBulk2(storeList); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopInsertBulk(java.util.List, int, java.lang.Object, net.spy.memcached.collection.CollectionAttributes) */ @Override public Future> asyncLopInsertBulk( List keyList, int index, Object value, CollectionAttributes attributesForCreate) { return asyncLopInsertBulk(keyList, index, value, attributesForCreate, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncLopInsertBulk(java.util.List, int, java.lang.Object, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) */ @Override public Future> asyncLopInsertBulk( List keyList, int index, T value, CollectionAttributes attributesForCreate, Transcoder tc) { Map> arrangedKey = groupingKeys(keyList, NON_PIPED_BULK_INSERT_CHUNK_SIZE); List> storeList = new ArrayList>( arrangedKey.size()); for (List eachKeyList : arrangedKey.values()) { storeList.add(new CollectionBulkStore.ListBulkStore( eachKeyList, index, value, attributesForCreate, tc)); } return asyncCollectionInsertBulk2(storeList); } /** * Generic bulk store operation for collection items. Public methods for collection items call this method. * * @param storeList list of operation parameters (item keys, element values, and so on) * @return future holding the map of item key and the result of the store operation on that key */ private Future> asyncCollectionInsertBulk2( List> storeList) { final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); final Map failedResult = new ConcurrentHashMap(); final CountDownLatch latch = new CountDownLatch(storeList.size()); for (final CollectionBulkStore store : storeList) { Operation op = opFact.collectionBulkStore(store.getKeyList(), store, new CollectionBulkStoreOperation.Callback() { public void receivedStatus(OperationStatus status) { } public void complete() { latch.countDown(); } public void gotStatus(Integer index, OperationStatus status) { if (!status.isSuccess()) { if (status instanceof CollectionOperationStatus) { failedResult.put( store.getKeyList().get(index), (CollectionOperationStatus) status); } else { failedResult.put( store.getKeyList().get(index), new CollectionOperationStatus( status)); } } } }); ops.add(op); addOp(store.getKeyList().get(0), op); } // return future return new CollectionFuture>( latch, operationTimeout) { @Override public boolean cancel(boolean ign) { boolean rv = false; for (Operation op : ops) { op.cancel("by application."); rv |= op.getState() == OperationState.WRITING; } return rv; } @Override public boolean isCancelled() { for (Operation op : ops) { if (op.isCancelled()) return true; } return false; } @Override public Map get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { if (!latch.await(duration, units)) { for (Operation op : ops) { MemcachedConnection.opTimedOut(op); } throw new CheckedOperationTimeoutException( "Timed out waiting for bulk operation", ops); } else { // continuous timeout counter will be reset for (Operation op : ops) { MemcachedConnection.opSucceeded(op); } } for (Operation op : ops) { if (op != null && op.hasErrored()) { throw new ExecutionException(op.getException()); } if (op.isCancelled()) { throw new ExecutionException(new RuntimeException(op.getCancelCause())); } } return failedResult; } @Override public CollectionOperationStatus getOperationStatus() { return null; } }; } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGetBulk(java.util.List, long, long, net.spy.memcached.collection.ElementFlagFilter, int, int) */ public CollectionGetBulkFuture>> asyncBopGetBulk( List keyList, long from, long to, ElementFlagFilter eFlagFilter, int offset, int count) { return asyncBopGetBulk(keyList, from, to, eFlagFilter, offset, count, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGetBulk(java.util.List, long, long, net.spy.memcached.collection.ElementFlagFilter, int, int, net.spy.memcached.transcoders.Transcoder) */ public CollectionGetBulkFuture>> asyncBopGetBulk( List keyList, long from, long to, ElementFlagFilter eFlagFilter, int offset, int count, Transcoder tc) { if (keyList == null) { throw new IllegalArgumentException("Key list is null."); } if (keyList.size() > MAX_GETBULK_KEY_COUNT) { throw new IllegalArgumentException("Key count must not exceed a maximum of " + MAX_GETBULK_KEY_COUNT + "."); } if (offset < 0) { throw new IllegalArgumentException("Offset must be 0 or positive integet."); } if (count > MAX_GETBULK_ELEMENT_COUNT) { throw new IllegalArgumentException("Count must not exceed a maximum of " + MAX_GETBULK_ELEMENT_COUNT + "."); } Map> rearrangedKeys = groupingKeys(keyList, BOPGET_BULK_CHUNK_SIZE); List> getBulkList = new ArrayList>( rearrangedKeys.size()); for (Entry> entry : rearrangedKeys.entrySet()) { getBulkList.add(new BTreeGetBulkWithLongTypeBkey(entry .getValue(), from, to, eFlagFilter, offset, count)); } return btreeGetBulk(getBulkList, offset, count, (to > from), tc); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGetBulk(java.util.List, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int) */ public CollectionGetBulkFuture>> asyncBopGetBulk( List keyList, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, int count) { return asyncBopGetBulk(keyList, from, to, eFlagFilter, offset, count, collectionTranscoder); } /* * (non-Javadoc) * @see net.spy.memcached.ArcusClientIF#asyncBopGetBulk(java.util.List, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int, net.spy.memcached.transcoders.Transcoder) */ public CollectionGetBulkFuture>> asyncBopGetBulk( List keyList, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, int count, Transcoder tc) { if (keyList == null) { throw new IllegalArgumentException("Key list is null."); } if (keyList.size() > MAX_GETBULK_KEY_COUNT) { throw new IllegalArgumentException("Key count must not exceed a maximum of " + MAX_GETBULK_KEY_COUNT + "."); } if (offset < 0) { throw new IllegalArgumentException("Offset must be 0 or positive integet."); } if (count > MAX_GETBULK_ELEMENT_COUNT) { throw new IllegalArgumentException("Count must not exceed a maximum of " + MAX_GETBULK_ELEMENT_COUNT + "."); } Map> rearrangedKeys = groupingKeys(keyList, BOPGET_BULK_CHUNK_SIZE); List> getBulkList = new ArrayList>( rearrangedKeys.size()); for (Entry> entry : rearrangedKeys.entrySet()) { getBulkList.add(new BTreeGetBulkWithByteTypeBkey(entry .getValue(), from, to, eFlagFilter, offset, count)); } boolean reverse = BTreeUtil.compareByteArraysInLexOrder(from, to) > 0; return btreeGetBulkByteArrayBKey(getBulkList, offset, count, reverse, tc); } /** * Generic bulk get operation for b+tree items. Public methods call this method. * * @param getBulkList list of operation parameters (item keys, element key range, and so on) * @param offset start index of the elements * @param count number of elements to fetch * @param reverse forward or backward * @param tc transcoder to serialize and unserialize value * @return future holding the map of item key and the fetched elements from that key */ private CollectionGetBulkFuture>> btreeGetBulk( final List> getBulkList, final int offset, final int count, final boolean reverse, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(getBulkList.size()); final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); final Map> result = new ConcurrentHashMap>(); for (BTreeGetBulk getBulk : getBulkList) { Operation op = opFact.bopGetBulk(getBulk, new BTreeGetBulkOperation.Callback() { @Override public void receivedStatus(OperationStatus status) { } @Override public void complete() { latch.countDown(); } @Override public void gotKey(String key, int elementCount, OperationStatus status) { result.put(key, new BTreeGetResult( (elementCount > 0) ? new TreeMap>() : null, new CollectionOperationStatus(status))); } @Override public void gotElement(String key, Object subkey, int flags, byte[] eflag, byte[] data) { result.get(key).addElement( new BTreeElement((Long)subkey, eflag, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); } }); ops.add(op); addOp(getBulk.getRepresentKey(), op); } return new CollectionGetBulkFuture>>(latch, ops, result, operationTimeout); } /** * Generic bulk get operation for b+tree items using byte-array type bkeys. Public methods call this method. * * @param getBulkList list of operation parameters (item keys, element key range, and so on) * @param offset start index of the elements * @param count number of elements to fetch * @param reverse forward or backward * @param tc transcoder to serialize and unserialize value * @return future holding the map of item key and the fetched elements from that key */ private CollectionGetBulkFuture>> btreeGetBulkByteArrayBKey( final List> getBulkList, final int offset, final int count, final boolean reverse, final Transcoder tc) { final CountDownLatch latch = new CountDownLatch(getBulkList.size()); final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); final Map> result = new ConcurrentHashMap>(); for (BTreeGetBulk getBulk : getBulkList) { Operation op = opFact.bopGetBulk(getBulk, new BTreeGetBulkOperation.Callback() { @Override public void receivedStatus(OperationStatus status) { } @Override public void complete() { latch.countDown(); } @Override public void gotKey(String key, int elementCount, OperationStatus status) { TreeMap> tree = null; if (elementCount > 0) { tree = new ByteArrayTreeMap>( (reverse) ? Collections.reverseOrder() : null); } result.put(key, new BTreeGetResult(tree, new CollectionOperationStatus(status))); } @Override public void gotElement(String key, Object subkey, int flags, byte[] eflag, byte[] data) { result.get(key).addElement( new BTreeElement( new ByteArrayBKey((byte[]) subkey), eflag, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); } }); ops.add(op); addOp(getBulk.getRepresentKey(), op); } return new CollectionGetBulkFuture>>(latch, ops, result, operationTimeout); } @Override public CollectionFuture asyncBopIncr(String key, long subkey, int by) { CollectionMutate collectionMutate = new BTreeMutate(Mutator.incr, by); return asyncCollectionMutate(key, String.valueOf(subkey), collectionMutate); } @Override public CollectionFuture asyncBopIncr(String key, byte[] subkey, int by) { CollectionMutate collectionMutate = new BTreeMutate(Mutator.incr, by); return asyncCollectionMutate(key,BTreeUtil.toHex(subkey), collectionMutate); } @Override public CollectionFuture asyncBopDecr(String key, long subkey, int by) { CollectionMutate collectionMutate = new BTreeMutate(Mutator.decr, by); return asyncCollectionMutate(key, String.valueOf(subkey), collectionMutate); } @Override public CollectionFuture asyncBopDecr(String key, byte[] subkey, int by) { CollectionMutate collectionMutate = new BTreeMutate(Mutator.decr, by); return asyncCollectionMutate(key,BTreeUtil.toHex(subkey), collectionMutate); } /** * Generic increment/decrement operation for b+tree items. Public methods call this method. * * @param k b+tree item's key * @param subkey element key * @param collectionMutate operation parameters (increment amount and so on) * @return future holding the incremented or decremented value */ private CollectionFuture asyncCollectionMutate(final String k, final String subkey, final CollectionMutate collectionMutate) { final CountDownLatch latch = new CountDownLatch(1); final CollectionFuture rv = new CollectionFuture(latch, operationTimeout); Operation op = opFact.collectionMutate(k, subkey, collectionMutate, new OperationCallback() { @Override public void receivedStatus(OperationStatus status) { if (status.isSuccess()) { try { rv.set(new Long(status.getMessage()), new CollectionOperationStatus( new OperationStatus(true, "END"))); } catch (NumberFormatException e) { rv.set(null, new CollectionOperationStatus( new OperationStatus(false, status.getMessage()))); if (getLogger().isDebugEnabled()) { getLogger().debug( "Key(%s), Bkey(%s) Unknown response : %s", k, subkey, status); } } return; } rv.set(null, new CollectionOperationStatus(status)); if (getLogger().isDebugEnabled()) { getLogger().debug( "Key(%s), Bkey(%s) Unknown response : %s", k, subkey, status); } } @Override public void complete() { latch.countDown(); } }); rv.setOperation(op); addOp(k, op); return rv; } /** * Get the client version. * @return version string */ private static String getVersion() { Enumeration resEnum; try { resEnum = Thread.currentThread().getContextClassLoader().getResources(JarFile.MANIFEST_NAME); while (resEnum.hasMoreElements()) { try { URL url = resEnum.nextElement(); InputStream is = url.openStream(); if (is != null) { Manifest manifest = new Manifest(is); java.util.jar.Attributes mainAttribs = manifest.getMainAttributes(); String version = mainAttribs.getValue("Arcusclient-Version"); if(version != null) { return version; } } } catch (Exception e) { } } } catch (IOException e1) { return "NONE"; } return "NONE"; } }