All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.hazelcast.cache.impl.AbstractCacheProxy Maven / Gradle / Ivy

There is a newer version: 5.4.0
Show newest version
/*
 * Copyright (c) 2008-2016, Hazelcast, Inc. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.hazelcast.cache.impl;

import com.hazelcast.config.CacheConfig;
import com.hazelcast.core.ICompletableFuture;
import com.hazelcast.map.impl.MapEntries;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.internal.serialization.SerializationService;
import com.hazelcast.partition.InternalPartitionService;
import com.hazelcast.spi.InternalCompletableFuture;
import com.hazelcast.spi.NodeEngine;
import com.hazelcast.spi.Operation;
import com.hazelcast.spi.OperationFactory;
import com.hazelcast.spi.OperationService;
import com.hazelcast.util.ExceptionUtil;

import javax.cache.CacheException;
import javax.cache.expiry.ExpiryPolicy;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

import static com.hazelcast.cache.impl.CacheProxyUtil.validateNotNull;

/**
 * 

Hazelcast provides extension functionality to default spec interface {@link javax.cache.Cache}. * {@link com.hazelcast.cache.ICache} is the designated interface.

*

AbstractCacheProxyExtension provides implementation of various {@link com.hazelcast.cache.ICache} methods.

*

Note: this partial implementation is used by server or embedded mode cache.

* * @param the type of key. * @param the type of value. * @see com.hazelcast.cache.impl.CacheProxy * @see com.hazelcast.cache.ICache */ abstract class AbstractCacheProxy extends AbstractInternalCacheProxy { protected AbstractCacheProxy(CacheConfig cacheConfig, NodeEngine nodeEngine, ICacheService cacheService) { super(cacheConfig, nodeEngine, cacheService); } @Override public InternalCompletableFuture getAsync(K key) { return getAsync(key, null); } @Override public InternalCompletableFuture getAsync(K key, ExpiryPolicy expiryPolicy) { ensureOpen(); validateNotNull(key); final Data keyData = serializationService.toData(key); final Operation op = operationProvider.createGetOperation(keyData, expiryPolicy); return invoke(op, keyData, false); } @Override public InternalCompletableFuture putAsync(K key, V value) { return putAsync(key, value, null); } @Override public InternalCompletableFuture putAsync(K key, V value, ExpiryPolicy expiryPolicy) { return putAsyncInternal(key, value, expiryPolicy, false, false); } @Override public InternalCompletableFuture putIfAbsentAsync(K key, V value) { return putIfAbsentAsyncInternal(key, value, null, false); } @Override public InternalCompletableFuture putIfAbsentAsync(K key, V value, ExpiryPolicy expiryPolicy) { return putIfAbsentAsyncInternal(key, value, expiryPolicy, false); } @Override public ICompletableFuture getAndPutAsync(K key, V value) { return getAndPutAsync(key, value, null); } @Override public ICompletableFuture getAndPutAsync(K key, V value, ExpiryPolicy expiryPolicy) { return putAsyncInternal(key, value, expiryPolicy, true, false); } @Override public InternalCompletableFuture removeAsync(K key) { return removeAsyncInternal(key, null, false, false, false); } @Override public InternalCompletableFuture removeAsync(K key, V oldValue) { return removeAsyncInternal(key, oldValue, true, false, false); } @Override public ICompletableFuture getAndRemoveAsync(K key) { return removeAsyncInternal(key, null, false, true, false); } @Override public ICompletableFuture replaceAsync(K key, V value) { return replaceAsyncInternal(key, null, value, null, false, false, false); } @Override public ICompletableFuture replaceAsync(K key, V value, ExpiryPolicy expiryPolicy) { return replaceAsyncInternal(key, null, value, expiryPolicy, false, false, false); } @Override public ICompletableFuture replaceAsync(K key, V oldValue, V newValue) { return replaceAsyncInternal(key, oldValue, newValue, null, true, false, false); } @Override public ICompletableFuture replaceAsync(K key, V oldValue, V newValue, ExpiryPolicy expiryPolicy) { return replaceAsyncInternal(key, oldValue, newValue, expiryPolicy, true, false, false); } @Override public ICompletableFuture getAndReplaceAsync(K key, V value) { return replaceAsyncInternal(key, null, value, null, false, true, false); } @Override public ICompletableFuture getAndReplaceAsync(K key, V value, ExpiryPolicy expiryPolicy) { return replaceAsyncInternal(key, null, value, expiryPolicy, false, true, false); } @Override public V get(K key, ExpiryPolicy expiryPolicy) { final Future f = getAsync(key, expiryPolicy); try { return f.get(); } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public Map getAll(Set keys, ExpiryPolicy expiryPolicy) { ensureOpen(); validateNotNull(keys); if (keys.isEmpty()) { return Collections.EMPTY_MAP; } final Set ks = new HashSet(keys.size()); for (K key : keys) { final Data k = serializationService.toData(key); ks.add(k); } final Map result = new HashMap(); final Collection partitions = getPartitionsForKeys(ks); try { OperationFactory factory = operationProvider.createGetAllOperationFactory(ks, expiryPolicy); OperationService operationService = getNodeEngine().getOperationService(); Map responses = operationService.invokeOnPartitions(getServiceName(), factory, partitions); for (Object response : responses.values()) { MapEntries mapEntries = serializationService.toObject(response); for (Map.Entry entry : mapEntries) { final V value = serializationService.toObject(entry.getValue()); final K key = serializationService.toObject(entry.getKey()); result.put(key, value); } } } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } return result; } @Override public void put(K key, V value, ExpiryPolicy expiryPolicy) { final InternalCompletableFuture f = putAsyncInternal(key, value, expiryPolicy, false, true); try { f.get(); } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public V getAndPut(K key, V value, ExpiryPolicy expiryPolicy) { final InternalCompletableFuture f = putAsyncInternal(key, value, expiryPolicy, true, true); try { return f.get(); } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public void putAll(Map map, ExpiryPolicy expiryPolicy) { ensureOpen(); validateNotNull(map); int partitionCount = partitionService.getPartitionCount(); try { // First we fill entry set per partition List>[] entriesPerPartition = groupDataToPartitions(map, partitionCount); // Then we invoke the operations and sync on completion of these operations putToAllPartitionsAndWaitForCompletion(entriesPerPartition, expiryPolicy); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } } private List>[] groupDataToPartitions(Map map, int partitionCount) { List>[] entriesPerPartition = new List[partitionCount]; for (Map.Entry entry : map.entrySet()) { K key = entry.getKey(); V value = entry.getValue(); validateNotNull(key, value); Data keyData = serializationService.toData(key); Data valueData = serializationService.toData(value); int partitionId = partitionService.getPartitionId(keyData); List> entries = entriesPerPartition[partitionId]; if (entries == null) { entries = new ArrayList>(); entriesPerPartition[partitionId] = entries; } entries.add(new AbstractMap.SimpleImmutableEntry(keyData, valueData)); } return entriesPerPartition; } private void putToAllPartitionsAndWaitForCompletion(List>[] entriesPerPartition, ExpiryPolicy expiryPolicy) throws ExecutionException, InterruptedException { List futures = new ArrayList(entriesPerPartition.length); for (int partitionId = 0; partitionId < entriesPerPartition.length; partitionId++) { List> entries = entriesPerPartition[partitionId]; if (entries != null) { // TODO If there is a single entry, we could make use of a put operation since that is a bit cheaper Operation operation = operationProvider.createPutAllOperation(entries, expiryPolicy, partitionId); Future f = invoke(operation, partitionId, true); futures.add(f); } } Throwable error = null; for (Future future : futures) { try { future.get(); } catch (Throwable t) { logger.finest("Error occurred while putting entries as batch!", t); if (error == null) { error = t; } } } if (error != null) { /* * There maybe multiple exceptions but we throw only the first one. * There are some ideas to throw all exceptions to caller but all of them have drawbacks: * - `Thread::addSuppressed` can be used to add other exceptions to the first one * but it is available since JDK 7. * - `Thread::initCause` can be used but this is wrong as semantic * since the other exceptions are not cause of the first one. * - We may wrap all exceptions in our custom exception (such as `MultipleCacheException`) * but in this case caller may wait different exception type and this idea causes problem. * For example see this TCK test: * `org.jsr107.tck.integration.CacheWriterTest::shouldWriteThoughUsingPutAll_partialSuccess` * In this test exception is thrown at `CacheWriter` and caller side expects this exception. * So as a result, we only throw the first exception and others are suppressed by only logging. */ ExceptionUtil.rethrow(error); } } @Override public boolean putIfAbsent(K key, V value, ExpiryPolicy expiryPolicy) { final Future f = putIfAbsentAsyncInternal(key, value, expiryPolicy, true); try { return f.get(); } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public boolean replace(K key, V oldValue, V newValue, ExpiryPolicy expiryPolicy) { final Future f = replaceAsyncInternal(key, oldValue, newValue, expiryPolicy, true, false, true); try { return f.get(); } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public boolean replace(K key, V value, ExpiryPolicy expiryPolicy) { final Future f = replaceAsyncInternal(key, null, value, expiryPolicy, false, false, true); try { return f.get(); } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public V getAndReplace(K key, V value, ExpiryPolicy expiryPolicy) { final Future f = replaceAsyncInternal(key, null, value, expiryPolicy, false, true, true); try { return f.get(); } catch (Throwable e) { throw ExceptionUtil.rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public int size() { ensureOpen(); try { final SerializationService serializationService = getNodeEngine().getSerializationService(); OperationFactory operationFactory = operationProvider.createSizeOperationFactory(); final Map results = getNodeEngine().getOperationService() .invokeOnAllPartitions(getServiceName(), operationFactory); int total = 0; for (Object result : results.values()) { total += (Integer) serializationService.toObject(result); } return total; } catch (Throwable t) { throw ExceptionUtil.rethrowAllowedTypeFirst(t, CacheException.class); } } private Set getPartitionsForKeys(Set keys) { final InternalPartitionService partitionService = getNodeEngine().getPartitionService(); final int partitions = partitionService.getPartitionCount(); final int capacity = Math.min(partitions, keys.size()); final Set partitionIds = new HashSet(capacity); final Iterator iterator = keys.iterator(); while (iterator.hasNext() && partitionIds.size() < partitions) { final Data key = iterator.next(); partitionIds.add(partitionService.getPartitionId(key)); } return partitionIds; } }