com.hazelcast.client.cache.impl.AbstractClientCacheProxy Maven / Gradle / Ivy
/*
* Copyright (c) 2008-2015, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.client.cache.impl;
import com.hazelcast.cache.CacheStatistics;
import com.hazelcast.cache.impl.ICacheInternal;
import com.hazelcast.cache.impl.client.CacheGetAllRequest;
import com.hazelcast.cache.impl.client.CacheGetRequest;
import com.hazelcast.cache.impl.client.CachePutAllRequest;
import com.hazelcast.cache.impl.client.CacheSizeRequest;
import com.hazelcast.cache.impl.nearcache.NearCache;
import com.hazelcast.client.impl.HazelcastClientInstanceImpl;
import com.hazelcast.client.spi.ClientContext;
import com.hazelcast.client.spi.ClientPartitionService;
import com.hazelcast.client.spi.impl.ClientInvocation;
import com.hazelcast.client.spi.impl.ClientInvocationFuture;
import com.hazelcast.config.CacheConfig;
import com.hazelcast.core.ExecutionCallback;
import com.hazelcast.core.ICompletableFuture;
import com.hazelcast.internal.serialization.SerializationService;
import com.hazelcast.map.impl.MapEntries;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.util.ExceptionUtil;
import com.hazelcast.util.executor.DelegatingFuture;
import javax.cache.CacheException;
import javax.cache.expiry.ExpiryPolicy;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import static com.hazelcast.cache.impl.CacheProxyUtil.validateNotNull;
/**
* Hazelcast provides extension functionality to default spec interface {@link javax.cache.Cache}.
* {@link com.hazelcast.cache.ICache} is the designated interface.
* AbstractCacheProxyExtension provides implementation of various {@link com.hazelcast.cache.ICache} methods.
* Note: this partial implementation is used by client.
*
* @param the type of key
* @param the type of value
*/
abstract class AbstractClientCacheProxy
extends AbstractClientInternalCacheProxy
implements ICacheInternal {
protected AbstractClientCacheProxy(CacheConfig cacheConfig, ClientContext clientContext,
HazelcastClientCacheManager cacheManager) {
super(cacheConfig, clientContext, cacheManager);
}
protected Object getFromNearCache(Data keyData, boolean async) {
Object cached = nearCache != null ? nearCache.get(keyData) : null;
if (cached != null && NearCache.NULL_OBJECT != cached) {
return !async ? cached : createCompletedFuture(cached);
}
return null;
}
protected Object getInternal(K key, ExpiryPolicy expiryPolicy, boolean async) {
final long start = System.nanoTime();
ensureOpen();
validateNotNull(key);
final Data keyData = toData(key);
Object cached = getFromNearCache(keyData, async);
if (cached != null) {
return cached;
}
CacheGetRequest request = new CacheGetRequest(nameWithPrefix, keyData, expiryPolicy,
cacheConfig.getInMemoryFormat());
ClientInvocationFuture future;
try {
final int partitionId = clientContext.getPartitionService().getPartitionId(key);
final HazelcastClientInstanceImpl client = (HazelcastClientInstanceImpl) clientContext.getHazelcastInstance();
final ClientInvocation clientInvocation = new ClientInvocation(client, request, partitionId);
future = clientInvocation.invoke();
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
if (async) {
if (nearCache != null) {
future.andThenInternal(new ExecutionCallback() {
public void onResponse(Data valueData) {
storeInNearCache(keyData, valueData, null);
if (statisticsEnabled) {
handleStatisticsOnGet(start, valueData);
}
}
public void onFailure(Throwable t) {
}
});
}
return new DelegatingFuture(future, clientContext.getSerializationService());
} else {
try {
Object value = future.get();
Object result = toObject(value);
if (nearCache != null) {
storeInNearCache(keyData, toData(value), (V) result);
}
if (statisticsEnabled) {
handleStatisticsOnGet(start, result);
}
return result;
} catch (Throwable e) {
throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class);
}
}
}
protected void handleStatisticsOnGet(long start, Object response) {
if (response == null) {
statistics.increaseCacheMisses();
} else {
statistics.increaseCacheHits();
}
statistics.addGetTimeNanos(System.nanoTime() - start);
}
@Override
public ICompletableFuture getAsync(K key) {
return getAsync(key, null);
}
@Override
public ICompletableFuture getAsync(K key, ExpiryPolicy expiryPolicy) {
return (ICompletableFuture) getInternal(key, expiryPolicy, true);
}
@Override
public ICompletableFuture putAsync(K key, V value) {
return putAsync(key, value, null);
}
@Override
public ICompletableFuture putAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return (ICompletableFuture) putInternal(key, value, expiryPolicy, false, true, true);
}
@Override
public ICompletableFuture putIfAbsentAsync(K key, V value) {
return (ICompletableFuture) putIfAbsentInternal(key, value, null, false, true);
}
@Override
public ICompletableFuture putIfAbsentAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return (ICompletableFuture) putIfAbsentInternal(key, value, expiryPolicy, false, true);
}
@Override
public ICompletableFuture getAndPutAsync(K key, V value) {
return getAndPutAsync(key, value, null);
}
@Override
public ICompletableFuture getAndPutAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return (ICompletableFuture) putInternal(key, value, expiryPolicy, true, false, true);
}
@Override
public ICompletableFuture removeAsync(K key) {
return removeAsyncInternal(key, null, false, false, false, true);
}
@Override
public ICompletableFuture removeAsync(K key, V oldValue) {
return removeAsyncInternal(key, oldValue, true, false, false, true);
}
@Override
public ICompletableFuture getAndRemoveAsync(K key) {
return removeAsyncInternal(key, null, false, true, false, true);
}
@Override
public ICompletableFuture replaceAsync(K key, V value) {
return replaceAsyncInternal(key, null, value, null, false, false, false, true);
}
@Override
public ICompletableFuture replaceAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return replaceAsyncInternal(key, null, value, expiryPolicy, false, false, false, true);
}
@Override
public ICompletableFuture replaceAsync(K key, V oldValue, V newValue) {
return replaceAsyncInternal(key, oldValue, newValue, null, true, false, false, true);
}
@Override
public ICompletableFuture replaceAsync(K key, V oldValue, V newValue, ExpiryPolicy expiryPolicy) {
return replaceAsyncInternal(key, oldValue, newValue, expiryPolicy, true, false, false, true);
}
@Override
public ICompletableFuture getAndReplaceAsync(K key, V value) {
return replaceAsyncInternal(key, null, value, null, false, true, false, true);
}
@Override
public ICompletableFuture getAndReplaceAsync(K key, V value, ExpiryPolicy expiryPolicy) {
return replaceAsyncInternal(key, null, value, expiryPolicy, false, true, false, true);
}
@Override
public V get(K key, ExpiryPolicy expiryPolicy) {
return (V) getInternal(key, expiryPolicy, false);
}
@Override
public Map getAll(Set extends K> keys, ExpiryPolicy expiryPolicy) {
final long start = System.nanoTime();
ensureOpen();
validateNotNull(keys);
if (keys.isEmpty()) {
return Collections.EMPTY_MAP;
}
final Set keySet = new HashSet(keys.size());
for (K key : keys) {
final Data k = toData(key);
keySet.add(k);
}
Map result = getAllFromNearCache(keySet);
final CacheGetAllRequest request = new CacheGetAllRequest(nameWithPrefix, keySet, expiryPolicy);
final MapEntries mapEntries = invoke(request);
for (Map.Entry dataEntry : mapEntries) {
final Data keyData = dataEntry.getKey();
final Data valueData = dataEntry.getValue();
final K key = toObject(keyData);
final V value = toObject(valueData);
result.put(key, value);
storeInNearCache(keyData, valueData, value);
}
if (statisticsEnabled) {
statistics.increaseCacheHits(mapEntries.size());
statistics.addGetTimeNanos(System.nanoTime() - start);
}
return result;
}
private Map getAllFromNearCache(Set keySet) {
Map result = new HashMap();
if (nearCache != null) {
final Iterator iterator = keySet.iterator();
while (iterator.hasNext()) {
Data key = iterator.next();
Object cached = nearCache.get(key);
if (cached != null && !NearCache.NULL_OBJECT.equals(cached)) {
result.put((K) toObject(key), (V) cached);
iterator.remove();
}
}
}
return result;
}
@Override
public void put(K key, V value, ExpiryPolicy expiryPolicy) {
putInternal(key, value, expiryPolicy, false, true, false);
}
@Override
public V getAndPut(K key, V value, ExpiryPolicy expiryPolicy) {
return (V) putInternal(key, value, expiryPolicy, true, true, false);
}
@Override
public void putAll(Map extends K, ? extends V> map, ExpiryPolicy expiryPolicy) {
final long start = System.nanoTime();
ensureOpen();
validateNotNull(map);
ClientPartitionService partitionService = clientContext.getPartitionService();
int partitionCount = partitionService.getPartitionCount();
try {
// First we fill entry set per partition
List>[] entriesPerPartition =
groupDataToPartitions(map, partitionService, partitionCount);
// Then we invoke the operations and sync on completion of these operations
putToAllPartitionsAndWaitForCompletion(entriesPerPartition, expiryPolicy, start);
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
}
private List>[] groupDataToPartitions(Map extends K, ? extends V> map,
ClientPartitionService partitionService,
int partitionCount) {
List>[] entriesPerPartition = new List[partitionCount];
SerializationService serializationService = clientContext.getSerializationService();
for (Map.Entry extends K, ? extends V> entry : map.entrySet()) {
K key = entry.getKey();
V value = entry.getValue();
validateNotNull(key, value);
Data keyData = serializationService.toData(key);
Data valueData = serializationService.toData(value);
int partitionId = partitionService.getPartitionId(keyData);
List> entries = entriesPerPartition[partitionId];
if (entries == null) {
entries = new ArrayList>();
entriesPerPartition[partitionId] = entries;
}
entries.add(new AbstractMap.SimpleImmutableEntry(keyData, valueData));
}
return entriesPerPartition;
}
private static final class FutureEntriesTuple {
private Future future;
private List> entries;
private FutureEntriesTuple(Future future, List> entries) {
this.future = future;
this.entries = entries;
}
}
private void putToAllPartitionsAndWaitForCompletion(List>[] entriesPerPartition,
ExpiryPolicy expiryPolicy, long start)
throws ExecutionException, InterruptedException {
List futureEntriesTuples =
new ArrayList(entriesPerPartition.length);
for (int partitionId = 0; partitionId < entriesPerPartition.length; partitionId++) {
List> entries = entriesPerPartition[partitionId];
if (entries != null) {
// TODO If there is a single entry, we could make use of a put operation since that is a bit cheaper
CachePutAllRequest request = new CachePutAllRequest(nameWithPrefix, cacheConfig.getInMemoryFormat(),
entries, expiryPolicy, partitionId);
Future f = invoke(request, partitionId, true);
futureEntriesTuples.add(new FutureEntriesTuple(f, entries));
}
}
waitResponseFromAllPartitionsForPutAll(futureEntriesTuples, start);
}
private void waitResponseFromAllPartitionsForPutAll(List futureEntriesTuples,
long start) {
Throwable error = null;
for (FutureEntriesTuple tuple : futureEntriesTuples) {
Future future = tuple.future;
List> entries = tuple.entries;
try {
future.get();
if (nearCache != null) {
handleNearCacheOnPutAll(entries, !cacheOnUpdate);
}
// Note that we count the batch put only if there is no exception while putting to target partition.
// In case of error, some of the entries might have been put and others might fail.
// But we simply ignore the actual put count here if there is an error.
if (statisticsEnabled) {
statistics.increaseCachePuts(entries.size());
}
} catch (Throwable t) {
if (nearCache != null) {
handleNearCacheOnPutAll(entries, true);
}
logger.finest("Error occurred while putting entries as batch!", t);
if (error == null) {
error = t;
}
}
}
if (statisticsEnabled) {
statistics.addPutTimeNanos(System.nanoTime() - start);
}
if (error != null) {
/*
* There maybe multiple exceptions but we throw only the first one.
* There are some ideas to throw all exceptions to caller but all of them have drawbacks:
* - `Thread::addSuppressed` can be used to add other exceptions to the first one
* but it is available since JDK 7.
* - `Thread::initCause` can be used but this is wrong as semantic
* since the other exceptions are not cause of the first one.
* - We may wrap all exceptions in our custom exception (such as `MultipleCacheException`)
* but in this case caller may wait different exception type and this idea causes problem.
* For example see this TCK test:
* `org.jsr107.tck.integration.CacheWriterTest::shouldWriteThoughUsingPutAll_partialSuccess`
* In this test exception is thrown at `CacheWriter` and caller side expects this exception.
* So as a result, we only throw the first exception and others are suppressed by only logging.
*/
ExceptionUtil.rethrow(error);
}
}
private void handleNearCacheOnPutAll(List> entries, boolean invalidate) {
if (invalidate) {
for (Map.Entry entry : entries) {
nearCache.remove(entry.getKey());
}
} else {
for (Map.Entry entry : entries) {
nearCache.put(entry.getKey(), entry.getValue());
}
}
}
@Override
public boolean putIfAbsent(K key, V value, ExpiryPolicy expiryPolicy) {
return (Boolean) putIfAbsentInternal(key, value, expiryPolicy, true, false);
}
@Override
public boolean replace(K key, V oldValue, V newValue, ExpiryPolicy expiryPolicy) {
final long start = System.nanoTime();
final Future f = replaceAsyncInternal(key, oldValue, newValue, expiryPolicy, true, false, true, false);
try {
boolean replaced = f.get();
if (statisticsEnabled) {
handleStatisticsOnReplace(false, start, replaced);
}
return replaced;
} catch (Throwable e) {
throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public boolean replace(K key, V value, ExpiryPolicy expiryPolicy) {
final long start = System.nanoTime();
final Future f = replaceAsyncInternal(key, null, value, expiryPolicy, false, false, true, false);
try {
boolean replaced = f.get();
if (statisticsEnabled) {
handleStatisticsOnReplace(false, start, replaced);
}
return replaced;
} catch (Throwable e) {
throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public V getAndReplace(K key, V value, ExpiryPolicy expiryPolicy) {
final long start = System.nanoTime();
final Future f = replaceAsyncInternal(key, null, value, expiryPolicy, false, true, true, false);
try {
V oldValue = f.get();
if (statisticsEnabled) {
handleStatisticsOnReplace(true, start, oldValue);
}
return oldValue;
} catch (Throwable e) {
throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class);
}
}
@Override
public int size() {
ensureOpen();
try {
CacheSizeRequest request = new CacheSizeRequest(nameWithPrefix);
Integer result = invoke(request);
if (result == null) {
return 0;
}
return result;
} catch (Throwable t) {
throw ExceptionUtil.rethrowAllowedTypeFirst(t, CacheException.class);
}
}
@Override
public CacheStatistics getLocalCacheStatistics() {
return statistics;
}
}