com.hazelcast.map.impl.proxy.MapProxySupport Maven / Gradle / Ivy
/*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.map.impl.proxy;
import com.hazelcast.aggregation.Aggregator;
import com.hazelcast.cluster.Address;
import com.hazelcast.config.EntryListenerConfig;
import com.hazelcast.config.InMemoryFormat;
import com.hazelcast.config.IndexConfig;
import com.hazelcast.config.IndexType;
import com.hazelcast.config.ListenerConfig;
import com.hazelcast.config.MapConfig;
import com.hazelcast.config.MapPartitionLostListenerConfig;
import com.hazelcast.config.MapStoreConfig;
import com.hazelcast.core.EntryEventType;
import com.hazelcast.core.EntryView;
import com.hazelcast.core.HazelcastInstanceAware;
import com.hazelcast.core.ReadOnly;
import com.hazelcast.internal.locksupport.LockProxySupport;
import com.hazelcast.internal.locksupport.LockSupportServiceImpl;
import com.hazelcast.internal.monitor.impl.LocalMapStatsImpl;
import com.hazelcast.internal.nio.ClassLoaderUtil;
import com.hazelcast.internal.partition.IPartition;
import com.hazelcast.internal.partition.IPartitionService;
import com.hazelcast.internal.serialization.Data;
import com.hazelcast.internal.serialization.SerializationService;
import com.hazelcast.internal.util.ExceptionUtil;
import com.hazelcast.internal.util.IterableUtil;
import com.hazelcast.internal.util.IterationType;
import com.hazelcast.internal.util.MutableLong;
import com.hazelcast.internal.util.Timer;
import com.hazelcast.internal.util.collection.PartitionIdSet;
import com.hazelcast.map.EntryProcessor;
import com.hazelcast.map.IMap;
import com.hazelcast.map.LocalMapStats;
import com.hazelcast.map.MapInterceptor;
import com.hazelcast.map.impl.EntryEventFilter;
import com.hazelcast.map.impl.MapEntries;
import com.hazelcast.map.impl.MapService;
import com.hazelcast.map.impl.MapServiceContext;
import com.hazelcast.map.impl.PartitionContainer;
import com.hazelcast.map.impl.event.MapEventPublisher;
import com.hazelcast.map.impl.operation.AddIndexOperation;
import com.hazelcast.map.impl.operation.AddInterceptorOperationSupplier;
import com.hazelcast.map.impl.operation.AwaitMapFlushOperation;
import com.hazelcast.map.impl.operation.IsEmptyOperationFactory;
import com.hazelcast.map.impl.operation.IsKeyLoadFinishedOperation;
import com.hazelcast.map.impl.operation.IsPartitionLoadedOperationFactory;
import com.hazelcast.map.impl.operation.MapOperation;
import com.hazelcast.map.impl.operation.MapOperationProvider;
import com.hazelcast.map.impl.operation.RemoveInterceptorOperationSupplier;
import com.hazelcast.map.impl.query.Query;
import com.hazelcast.map.impl.query.QueryEngine;
import com.hazelcast.map.impl.query.QueryEventFilter;
import com.hazelcast.map.impl.query.Result;
import com.hazelcast.map.impl.query.Target;
import com.hazelcast.map.impl.query.Target.TargetMode;
import com.hazelcast.map.impl.querycache.QueryCacheContext;
import com.hazelcast.map.impl.querycache.subscriber.QueryCacheEndToEndProvider;
import com.hazelcast.map.impl.querycache.subscriber.SubscriberContext;
import com.hazelcast.map.impl.recordstore.RecordStore;
import com.hazelcast.map.listener.MapListener;
import com.hazelcast.map.listener.MapPartitionLostListener;
import com.hazelcast.partition.PartitioningStrategy;
import com.hazelcast.projection.Projection;
import com.hazelcast.query.PartitionPredicate;
import com.hazelcast.query.Predicate;
import com.hazelcast.query.impl.IndexUtils;
import com.hazelcast.query.impl.predicates.TruePredicate;
import com.hazelcast.spi.impl.AbstractDistributedObject;
import com.hazelcast.spi.impl.InitializingObject;
import com.hazelcast.spi.impl.InternalCompletableFuture;
import com.hazelcast.spi.impl.NodeEngine;
import com.hazelcast.spi.impl.eventservice.EventFilter;
import com.hazelcast.spi.impl.operationservice.BinaryOperationFactory;
import com.hazelcast.spi.impl.operationservice.Operation;
import com.hazelcast.spi.impl.operationservice.OperationFactory;
import com.hazelcast.spi.impl.operationservice.OperationService;
import com.hazelcast.spi.impl.operationservice.impl.InvocationFuture;
import com.hazelcast.spi.properties.HazelcastProperties;
import com.hazelcast.spi.properties.HazelcastProperty;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EventListener;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Supplier;
import static com.hazelcast.core.EntryEventType.CLEAR_ALL;
import static com.hazelcast.internal.util.CollectionUtil.asIntegerList;
import static com.hazelcast.internal.util.ConcurrencyUtil.CALLER_RUNS;
import static com.hazelcast.internal.util.ExceptionUtil.rethrow;
import static com.hazelcast.internal.util.InvocationUtil.invokeOnStableClusterSerial;
import static com.hazelcast.internal.util.IterableUtil.nullToEmpty;
import static com.hazelcast.internal.util.MapUtil.createHashMap;
import static com.hazelcast.internal.util.MapUtil.toIntSize;
import static com.hazelcast.internal.util.Preconditions.checkFalse;
import static com.hazelcast.internal.util.Preconditions.checkNotNull;
import static com.hazelcast.internal.util.SetUtil.createHashSet;
import static com.hazelcast.internal.util.ThreadUtil.getThreadId;
import static com.hazelcast.internal.util.TimeUtil.timeInMsOrOneIfResultIsZero;
import static com.hazelcast.map.impl.EntryRemovingProcessor.ENTRY_REMOVING_PROCESSOR;
import static com.hazelcast.map.impl.MapService.SERVICE_NAME;
import static com.hazelcast.map.impl.query.Target.createPartitionTarget;
import static com.hazelcast.query.Predicates.alwaysFalse;
import static com.hazelcast.spi.impl.InternalCompletableFuture.newCompletedFuture;
import static java.lang.Math.ceil;
import static java.lang.Math.log10;
import static java.lang.Math.min;
import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
abstract class MapProxySupport
extends AbstractDistributedObject
implements IMap, InitializingObject {
protected static final String NULL_KEY_IS_NOT_ALLOWED = "Null key is not allowed!";
protected static final String NULL_KEYS_ARE_NOT_ALLOWED = "Null keys collection is not allowed!";
protected static final String NULL_VALUE_IS_NOT_ALLOWED = "Null value is not allowed!";
protected static final String NULL_PREDICATE_IS_NOT_ALLOWED = "Predicate should not be null!";
protected static final String NULL_LISTENER_IS_NOT_ALLOWED = "Null listener is not allowed!";
protected static final String NULL_AGGREGATOR_IS_NOT_ALLOWED = "Aggregator should not be null!";
protected static final String NULL_PROJECTION_IS_NOT_ALLOWED = "Projection should not be null!";
protected static final String NULL_TTL_UNIT_IS_NOT_ALLOWED = "Null ttlUnit is not allowed!";
protected static final String NULL_MAX_IDLE_UNIT_IS_NOT_ALLOWED = "Null maxIdleUnit is not allowed!";
protected static final String NULL_TIMEUNIT_IS_NOT_ALLOWED = "Null timeunit is not allowed!";
protected static final String NULL_BIFUNCTION_IS_NOT_ALLOWED = "Null BiFunction is not allowed!";
protected static final String NULL_FUNCTION_IS_NOT_ALLOWED = "Null Function is not allowed!";
protected static final String NULL_CONSUMER_IS_NOT_ALLOWED = "Null Consumer is not allowed!";
private static final int INITIAL_WAIT_LOAD_SLEEP_MILLIS = 10;
private static final int MAXIMAL_WAIT_LOAD_SLEEP_MILLIS = 1000;
/**
* Retry count when an interceptor registration/de-registration operation fails.
*/
private static final int MAX_RETRIES = 100;
/**
* Defines the batch size for operations of {@link IMap#putAll(Map)} and {@link IMap#setAll(Map)} calls.
*
* A value of {@code 0} disables the batching and will send a single operation per member with all map entries.
*
* If you set this value too high, you may ran into OOME or blocked network pipelines due to huge operations.
* If you set this value too low, you will lower the performance of the putAll() operation.
*/
private static final HazelcastProperty MAP_PUT_ALL_BATCH_SIZE
= new HazelcastProperty("hazelcast.map.put.all.batch.size", 0);
/**
* Defines the initial size of entry arrays per partition for {@link IMap#putAll(Map)} and {@link IMap#setAll(Map)} calls.
*
* {@link IMap#putAll(Map)} / {@link IMap#setAll(Map)} splits up the entries of the user input map per partition,
* to eventually send the entries the correct target nodes.
* So the method creates multiple arrays with map entries per partition.
* This value determines how the initial size of these arrays is calculated.
*
* The default value of {@code 0} uses an educated guess, depending on the map size, which is a good overall strategy.
* If you insert entries which don't match a normal partition distribution you should configure this factor.
* The initial size is calculated by this formula:
* {@code initialSize = ceil(MAP_PUT_ALL_INITIAL_SIZE_FACTOR * map.size() / PARTITION_COUNT)}
*
* As a rule of thumb you can try the following values:
*
* - {@code 10.0} for map sizes up to 500 entries
* - {@code 5.0} for map sizes between 500 and 5000 entries
* - {@code 1.5} for map sizes between up to 50000 entries
* - {@code 1.0} for map sizes beyond 50000 entries
*
*
* If you set this value too high, you will waste memory.
* If you set this value too low, you will suffer from expensive {@link java.util.Arrays#copyOf} calls.
*/
private static final HazelcastProperty MAP_PUT_ALL_INITIAL_SIZE_FACTOR
= new HazelcastProperty("hazelcast.map.put.all.initial.size.factor", 0);
protected final String name;
protected final LocalMapStatsImpl localMapStats;
protected final LockProxySupport lockSupport;
protected final PartitioningStrategy partitionStrategy;
protected final MapServiceContext mapServiceContext;
protected final IPartitionService partitionService;
protected final Address thisAddress;
protected final OperationService operationService;
protected final SerializationService serializationService;
protected final boolean statisticsEnabled;
protected final MapConfig mapConfig;
// not final for testing purposes
protected MapOperationProvider operationProvider;
private final int putAllBatchSize;
private final float putAllInitialSizeFactor;
protected MapProxySupport(String name, MapService service, NodeEngine nodeEngine, MapConfig mapConfig) {
super(nodeEngine, service);
this.name = name;
HazelcastProperties properties = nodeEngine.getProperties();
this.mapServiceContext = service.getMapServiceContext();
this.mapConfig = mapConfig;
this.partitionStrategy = mapServiceContext.getPartitioningStrategy(mapConfig.getName(),
mapConfig.getPartitioningStrategyConfig());
this.localMapStats = mapServiceContext.getLocalMapStatsProvider().getLocalMapStatsImpl(name);
this.partitionService = getNodeEngine().getPartitionService();
this.lockSupport = new LockProxySupport(MapService.getObjectNamespace(name),
LockSupportServiceImpl.getMaxLeaseTimeInMillis(properties));
this.operationProvider = mapServiceContext.getMapOperationProvider(name);
this.operationService = nodeEngine.getOperationService();
this.serializationService = nodeEngine.getSerializationService();
this.thisAddress = nodeEngine.getClusterService().getThisAddress();
this.statisticsEnabled = mapConfig.isStatisticsEnabled();
this.putAllBatchSize = properties.getInteger(MAP_PUT_ALL_BATCH_SIZE);
this.putAllInitialSizeFactor = properties.getFloat(MAP_PUT_ALL_INITIAL_SIZE_FACTOR);
}
@Override
public final String getName() {
return name;
}
@Override
public final String getServiceName() {
return SERVICE_NAME;
}
public MapConfig getMapConfig() {
return mapConfig;
}
@Override
public void initialize() {
initializeListeners();
initializeIndexes();
initializeMapStoreLoad();
}
private void initializeListeners() {
for (EntryListenerConfig listenerConfig : mapConfig.getEntryListenerConfigs()) {
MapListener listener = initializeListener(listenerConfig);
if (listener != null) {
if (listenerConfig.isLocal()) {
addLocalEntryListenerInternal(listener);
} else {
addEntryListenerInternal(listener, null, listenerConfig.isIncludeValue());
}
}
}
for (MapPartitionLostListenerConfig listenerConfig : mapConfig.getPartitionLostListenerConfigs()) {
MapPartitionLostListener listener = initializeListener(listenerConfig);
if (listener != null) {
addPartitionLostListenerInternal(listener);
}
}
}
private T initializeListener(ListenerConfig listenerConfig) {
T listener = getListenerImplOrNull(listenerConfig);
if (listener instanceof HazelcastInstanceAware) {
((HazelcastInstanceAware) listener).setHazelcastInstance(getNodeEngine().getHazelcastInstance());
}
return listener;
}
@SuppressWarnings("unchecked")
private T getListenerImplOrNull(ListenerConfig listenerConfig) {
EventListener implementation = listenerConfig.getImplementation();
if (implementation != null) {
return (T) implementation;
}
String className = listenerConfig.getClassName();
if (className != null) {
try {
ClassLoader configClassLoader = getNodeEngine().getConfigClassLoader();
return ClassLoaderUtil.newInstance(configClassLoader, className);
} catch (Exception e) {
throw rethrow(e);
}
}
// returning null to preserve previous behavior
return null;
}
private void initializeIndexes() {
for (IndexConfig index : mapConfig.getIndexConfigs()) {
addIndex(index);
}
}
private void initializeMapStoreLoad() {
MapStoreConfig mapStoreConfig = mapConfig.getMapStoreConfig();
if (mapStoreConfig != null && mapStoreConfig.isEnabled()) {
MapStoreConfig.InitialLoadMode initialLoadMode = mapStoreConfig.getInitialLoadMode();
if (MapStoreConfig.InitialLoadMode.EAGER.equals(initialLoadMode)) {
waitUntilLoaded();
}
}
}
public PartitioningStrategy getPartitionStrategy() {
return partitionStrategy;
}
public MapOperationProvider getOperationProvider() {
return operationProvider;
}
public void setOperationProvider(MapOperationProvider operationProvider) {
this.operationProvider = operationProvider;
}
public int getTotalBackupCount() {
return mapConfig.getBackupCount() + mapConfig.getAsyncBackupCount();
}
protected QueryEngine getMapQueryEngine() {
return mapServiceContext.getQueryEngine(name);
}
protected boolean isMapStoreEnabled() {
MapStoreConfig mapStoreConfig = mapConfig.getMapStoreConfig();
return mapStoreConfig != null && mapStoreConfig.isEnabled();
}
protected Object getInternal(Object key) {
// TODO: action for read-backup true is not well tested
Data keyData = toDataWithStrategy(key);
if (mapConfig.isReadBackupData()) {
Object fromBackup = readBackupDataOrNull(keyData);
if (fromBackup != null) {
return fromBackup;
}
}
MapOperation operation = operationProvider.createGetOperation(name, keyData);
operation.setThreadId(getThreadId());
return invokeOperation(keyData, operation);
}
private Data readBackupDataOrNull(Data key) {
int partitionId = partitionService.getPartitionId(key);
IPartition partition = partitionService.getPartition(partitionId, false);
if (!partition.isOwnerOrBackup(thisAddress)) {
return null;
}
PartitionContainer partitionContainer = mapServiceContext.getPartitionContainer(partitionId);
RecordStore recordStore = partitionContainer.getExistingRecordStore(name);
if (recordStore == null) {
return null;
}
return recordStore.readBackupData(key);
}
protected InternalCompletableFuture getAsyncInternal(Object key) {
Data keyData = toDataWithStrategy(key);
int partitionId = partitionService.getPartitionId(keyData);
MapOperation operation = operationProvider.createGetOperation(name, keyData);
try {
long startTimeNanos = Timer.nanos();
InvocationFuture future = operationService
.createInvocationBuilder(SERVICE_NAME, operation, partitionId)
.setResultDeserialized(false)
.invoke();
if (statisticsEnabled) {
future.whenCompleteAsync(new IncrementStatsExecutionCallback<>(operation, startTimeNanos), CALLER_RUNS);
}
return future;
} catch (Throwable t) {
throw rethrow(t);
}
}
protected Data putInternal(Object key, Data valueData,
long ttl, TimeUnit ttlUnit,
long maxIdle, TimeUnit maxIdleUnit) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = newPutOperation(keyData, valueData, ttl, ttlUnit, maxIdle, maxIdleUnit);
return (Data) invokeOperation(keyData, operation);
}
private MapOperation newPutOperation(Data keyData, Data valueData,
long ttl, TimeUnit timeunit,
long maxIdle, TimeUnit maxIdleUnit) {
return operationProvider.createPutOperation(name, keyData, valueData,
timeInMsOrOneIfResultIsZero(ttl, timeunit),
timeInMsOrOneIfResultIsZero(maxIdle, maxIdleUnit));
}
protected boolean tryPutInternal(Object key, Data value, long timeout, TimeUnit timeunit) {
Data keyData = toDataWithStrategy(key);
long timeInMillis = timeInMsOrOneIfResultIsZero(timeout, timeunit);
MapOperation operation = operationProvider.createTryPutOperation(name, keyData, value, timeInMillis);
return (Boolean) invokeOperation(keyData, operation);
}
protected Data putIfAbsentInternal(Object key, Data value,
long ttl, TimeUnit ttlUnit,
long maxIdle, TimeUnit maxIdleUnit) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = newPutIfAbsentOperation(keyData, value, ttl, ttlUnit, maxIdle, maxIdleUnit);
return (Data) invokeOperation(keyData, operation);
}
private MapOperation newPutIfAbsentOperation(Data keyData, Data valueData,
long ttl, TimeUnit timeunit,
long maxIdle, TimeUnit maxIdleUnit) {
return operationProvider.createPutIfAbsentOperation(name, keyData, valueData,
timeInMsOrOneIfResultIsZero(ttl, timeunit),
timeInMsOrOneIfResultIsZero(maxIdle, maxIdleUnit));
}
protected void putTransientInternal(Object key, Data value,
long ttl, TimeUnit ttlUnit,
long maxIdle, TimeUnit maxIdleUnit) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = newPutTransientOperation(keyData, value, ttl, ttlUnit, maxIdle, maxIdleUnit);
invokeOperation(keyData, operation);
}
private MapOperation newPutTransientOperation(Data keyData, Data valueData,
long ttl, TimeUnit timeunit,
long maxIdle, TimeUnit maxIdleUnit) {
return operationProvider.createPutTransientOperation(name, keyData, valueData,
timeInMsOrOneIfResultIsZero(ttl, timeunit),
timeInMsOrOneIfResultIsZero(maxIdle, maxIdleUnit));
}
private Object invokeOperation(Data key, MapOperation operation) {
int partitionId = partitionService.getPartitionId(key);
operation.setThreadId(getThreadId());
try {
Object result;
if (statisticsEnabled) {
long startTimeNanos = Timer.nanos();
Future future = operationService
.createInvocationBuilder(SERVICE_NAME, operation, partitionId)
.setResultDeserialized(false)
.invoke();
result = future.get();
mapServiceContext.incrementOperationStats(startTimeNanos, localMapStats, name, operation);
} else {
Future future = operationService
.createInvocationBuilder(SERVICE_NAME, operation, partitionId)
.setResultDeserialized(false)
.invoke();
result = future.get();
}
return result;
} catch (Throwable t) {
throw rethrow(t);
}
}
protected InternalCompletableFuture putAsyncInternal(Object key, Data valueData,
long ttl, TimeUnit ttlUnit,
long maxIdle, TimeUnit maxIdleUnit) {
Data keyData = toDataWithStrategy(key);
int partitionId = partitionService.getPartitionId(keyData);
MapOperation operation = newPutOperation(keyData, valueData, ttl, ttlUnit, maxIdle, maxIdleUnit);
operation.setThreadId(getThreadId());
try {
long startTimeNanos = Timer.nanos();
InvocationFuture future = operationService.invokeOnPartition(SERVICE_NAME, operation, partitionId);
if (statisticsEnabled) {
future.whenCompleteAsync(new IncrementStatsExecutionCallback<>(operation, startTimeNanos), CALLER_RUNS);
}
return future;
} catch (Throwable t) {
throw rethrow(t);
}
}
protected InternalCompletableFuture putIfAbsentAsyncInternal(Object key, Data value,
long ttl, TimeUnit ttlUnit,
long maxIdle, TimeUnit maxIdleUnit) {
Data keyData = toDataWithStrategy(key);
int partitionId = partitionService.getPartitionId(key);
MapOperation operation = newPutIfAbsentOperation(keyData, value, ttl, ttlUnit, maxIdle, maxIdleUnit);
operation.setThreadId(getThreadId());
try {
long startTimeNanos = Timer.nanos();
InvocationFuture future = operationService.invokeOnPartition(SERVICE_NAME, operation, partitionId);
if (statisticsEnabled) {
future.whenCompleteAsync(new IncrementStatsExecutionCallback<>(operation, startTimeNanos), CALLER_RUNS);
}
return future;
} catch (Throwable t) {
throw rethrow(t);
}
}
protected InternalCompletableFuture setAsyncInternal(Object key, Data valueData, long ttl, TimeUnit timeunit,
long maxIdle, TimeUnit maxIdleUnit) {
Data keyData = toDataWithStrategy(key);
int partitionId = partitionService.getPartitionId(keyData);
MapOperation operation = newSetOperation(keyData, valueData, ttl, timeunit, maxIdle, maxIdleUnit);
operation.setThreadId(getThreadId());
try {
final InvocationFuture result;
if (statisticsEnabled) {
long startTimeNanos = Timer.nanos();
result = operationService
.invokeOnPartition(SERVICE_NAME, operation, partitionId);
result.whenCompleteAsync(new IncrementStatsExecutionCallback<>(operation, startTimeNanos), CALLER_RUNS);
} else {
result = operationService
.invokeOnPartition(SERVICE_NAME, operation, partitionId);
}
return result;
} catch (Throwable t) {
throw rethrow(t);
}
}
protected boolean replaceInternal(Object key, Data expect, Data update) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = operationProvider.createReplaceIfSameOperation(name, keyData, expect, update);
return (Boolean) invokeOperation(keyData, operation);
}
protected Data replaceInternal(Object key, Data value) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = operationProvider.createReplaceOperation(name, keyData, value);
return (Data) invokeOperation(keyData, operation);
}
// WARNING: when UpdateEvent is fired it does *NOT* contain the oldValue
// see this: https://github.com/hazelcast/hazelcast/pull/6088#issuecomment-136025968
protected void setInternal(Object key, Data valueData, long ttl, TimeUnit timeunit, long maxIdle, TimeUnit maxIdleUnit) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = newSetOperation(keyData, valueData, ttl, timeunit, maxIdle, maxIdleUnit);
invokeOperation(keyData, operation);
}
private MapOperation newSetOperation(Data keyData, Data valueData,
long ttl, TimeUnit timeunit,
long maxIdle, TimeUnit maxIdleUnit) {
return operationProvider.createSetOperation(name, keyData, valueData,
timeInMsOrOneIfResultIsZero(ttl, timeunit),
timeInMsOrOneIfResultIsZero(maxIdle, maxIdleUnit));
}
/**
* Evicts a key from a map.
*
* @param key the key to evict
* @return {@code true} if eviction was successful, {@code false} otherwise
*/
protected boolean evictInternal(Object key) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = operationProvider.createEvictOperation(name, keyData, false);
return (Boolean) invokeOperation(keyData, operation);
}
protected void evictAllInternal() {
try {
Operation operation = operationProvider.createEvictAllOperation(name);
BinaryOperationFactory factory = new BinaryOperationFactory(operation, getNodeEngine());
Map resultMap = operationService.invokeOnAllPartitions(SERVICE_NAME, factory);
int evictedCount = 0;
for (Object object : resultMap.values()) {
evictedCount += (Integer) object;
}
if (evictedCount > 0) {
publishMapEvent(evictedCount, EntryEventType.EVICT_ALL);
}
} catch (Throwable t) {
throw rethrow(t);
}
}
protected void loadAllInternal(boolean replaceExistingValues) {
int mapNamePartition = partitionService.getPartitionId(name);
Operation operation = operationProvider.createLoadMapOperation(name, replaceExistingValues);
Future loadMapFuture = operationService.invokeOnPartition(SERVICE_NAME, operation, mapNamePartition);
try {
loadMapFuture.get();
waitUntilLoaded();
} catch (Throwable t) {
throw rethrow(t);
}
}
/**
* Maps keys to corresponding partitions and sends operations to them.
*/
protected void loadInternal(Set keys, Iterable dataKeys, boolean replaceExistingValues) {
if (dataKeys == null) {
dataKeys = convertToData(keys);
}
Map> partitionIdToKeys = getPartitionIdToKeysMap(dataKeys);
Iterable>> entries = partitionIdToKeys.entrySet();
for (Entry> entry : entries) {
Integer partitionId = entry.getKey();
List correspondingKeys = entry.getValue();
Operation operation = createLoadAllOperation(correspondingKeys, replaceExistingValues);
operationService.invokeOnPartition(SERVICE_NAME, operation, partitionId);
}
waitUntilLoaded();
}
protected Iterable convertToData(Iterable keys) {
return IterableUtil.map(nullToEmpty(keys), new KeyToData());
}
private Operation createLoadAllOperation(List keys, boolean replaceExistingValues) {
return operationProvider.createLoadAllOperation(name, keys, replaceExistingValues);
}
protected Data removeInternal(Object key) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = operationProvider.createRemoveOperation(name, keyData);
return (Data) invokeOperation(keyData, operation);
}
protected void deleteInternal(Object key) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = operationProvider.createDeleteOperation(name, keyData, false);
invokeOperation(keyData, operation);
}
protected boolean removeInternal(Object key, Data value) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = operationProvider.createRemoveIfSameOperation(name, keyData, value);
return (Boolean) invokeOperation(keyData, operation);
}
protected boolean tryRemoveInternal(Object key, long timeout, TimeUnit timeunit) {
Data keyData = toDataWithStrategy(key);
MapOperation operation = operationProvider.createTryRemoveOperation(name, keyData,
timeInMsOrOneIfResultIsZero(timeout, timeunit));
return (Boolean) invokeOperation(keyData, operation);
}
protected void removeAllInternal(Predicate predicate) {
try {
if (predicate instanceof PartitionPredicate) {
PartitionPredicate partitionPredicate = (PartitionPredicate) predicate;
OperationFactory operation = operationProvider
.createPartitionWideEntryWithPredicateOperationFactory(name, ENTRY_REMOVING_PROCESSOR,
partitionPredicate.getTarget());
Data partitionKey = toDataWithStrategy(partitionPredicate.getPartitionKey());
int partitionId = partitionService.getPartitionId(partitionKey);
// invokeOnPartitions is used intentionally here, instead of invokeOnPartition, since
// the later one doesn't support PartitionAwareOperationFactory, which we need to use
// to speed up the removal operation using global indexes
// (see PartitionWideEntryWithPredicateOperationFactory.createFactoryOnRunner).
operationService.invokeOnPartitions(SERVICE_NAME, operation, singletonList(partitionId));
} else {
OperationFactory operation = operationProvider
.createPartitionWideEntryWithPredicateOperationFactory(name, ENTRY_REMOVING_PROCESSOR, predicate);
operationService.invokeOnAllPartitions(SERVICE_NAME, operation);
}
} catch (Throwable t) {
throw rethrow(t);
}
}
protected boolean setTtlInternal(Object key, long ttl, TimeUnit timeUnit) {
long ttlInMillis = timeUnit.toMillis(ttl);
Data keyData = serializationService.toData(key);
MapOperation operation = operationProvider.createSetTtlOperation(name, keyData, ttlInMillis);
return (Boolean) invokeOperation(keyData, operation);
}
protected InternalCompletableFuture removeAsyncInternal(Object key) {
Data keyData = toDataWithStrategy(key);
int partitionId = partitionService.getPartitionId(keyData);
MapOperation operation = operationProvider.createRemoveOperation(name, keyData);
operation.setThreadId(getThreadId());
try {
long startTimeNanos = Timer.nanos();
InvocationFuture future = operationService.invokeOnPartition(SERVICE_NAME, operation, partitionId);
if (statisticsEnabled) {
future.whenCompleteAsync(new IncrementStatsExecutionCallback<>(operation, startTimeNanos), CALLER_RUNS);
}
return future;
} catch (Throwable t) {
throw rethrow(t);
}
}
protected boolean containsKeyInternal(Object key) {
Data keyData = toDataWithStrategy(key);
int partitionId = partitionService.getPartitionId(keyData);
MapOperation containsKeyOperation = operationProvider.createContainsKeyOperation(name, keyData);
containsKeyOperation.setThreadId(getThreadId());
containsKeyOperation.setServiceName(SERVICE_NAME);
try {
Future future = operationService.invokeOnPartition(SERVICE_NAME, containsKeyOperation, partitionId);
Object object = future.get();
incrementOtherOperationsStat();
return (Boolean) toObject(object);
} catch (Throwable t) {
throw rethrow(t);
}
}
public void waitUntilLoaded() {
try {
int mapNamesPartitionId = partitionService.getPartitionId(name);
// first we have to check if key-load finished - otherwise
// the loading on other partitions might not have started.
// In this case we can't invoke IsPartitionLoadedOperation
// -> they will return "true", but it won't be correct
int sleepDurationMillis = INITIAL_WAIT_LOAD_SLEEP_MILLIS;
while (true) {
Operation op = new IsKeyLoadFinishedOperation(name);
Future loadingFuture = operationService.invokeOnPartition(SERVICE_NAME, op, mapNamesPartitionId);
if (loadingFuture.get()) {
break;
}
// sleep with some back-off
TimeUnit.MILLISECONDS.sleep(sleepDurationMillis);
sleepDurationMillis = (sleepDurationMillis * 2 < MAXIMAL_WAIT_LOAD_SLEEP_MILLIS)
? sleepDurationMillis * 2 : MAXIMAL_WAIT_LOAD_SLEEP_MILLIS;
}
OperationFactory opFactory = new IsPartitionLoadedOperationFactory(name);
Map results = operationService.invokeOnAllPartitions(SERVICE_NAME, opFactory);
// wait for all the data to be loaded on all partitions - wait forever
waitAllTrue(results, opFactory);
} catch (Throwable t) {
throw rethrow(t);
}
}
private void waitAllTrue(Map results, OperationFactory operationFactory) throws InterruptedException {
Iterator> iterator = results.entrySet().iterator();
boolean isFinished = false;
PartitionIdSet retrySet = new PartitionIdSet(partitionService.getPartitionCount());
while (!isFinished) {
while (iterator.hasNext()) {
Entry entry = iterator.next();
if (Boolean.TRUE.equals(entry.getValue())) {
iterator.remove();
} else {
retrySet.add(entry.getKey());
}
}
if (!retrySet.isEmpty()) {
results = retryPartitions(retrySet, operationFactory);
iterator = results.entrySet().iterator();
TimeUnit.SECONDS.sleep(1);
retrySet.clear();
} else {
isFinished = true;
}
}
}
private Map retryPartitions(Collection partitions, OperationFactory operationFactory) {
try {
return operationService.invokeOnPartitions(SERVICE_NAME, operationFactory, partitions);
} catch (Throwable t) {
throw rethrow(t);
}
}
@Override
public int size() {
try {
OperationFactory sizeOperationFactory = operationProvider.createMapSizeOperationFactory(name);
Map results = operationService.invokeOnAllPartitions(SERVICE_NAME, sizeOperationFactory);
incrementOtherOperationsStat();
long total = 0;
for (Object result : results.values()) {
Integer size = toObject(result);
total += size;
}
return toIntSize(total);
} catch (Throwable t) {
throw rethrow(t);
}
}
public boolean containsValueInternal(Data dataValue) {
try {
OperationFactory operationFactory = operationProvider.createContainsValueOperationFactory(name, dataValue);
Map results = operationService.invokeOnAllPartitions(SERVICE_NAME, operationFactory);
incrementOtherOperationsStat();
for (Object result : results.values()) {
Boolean contains = toObject(result);
if (contains) {
return true;
}
}
return false;
} catch (Throwable t) {
throw rethrow(t);
}
}
@Override
public boolean isEmpty() {
try {
// TODO: we don't need to wait for all futures to complete, we can stop on the first returned false
// also there is no need to make use of IsEmptyOperation, just use size to reduce the amount of code
IsEmptyOperationFactory factory = new IsEmptyOperationFactory(name);
Map results = operationService.invokeOnAllPartitions(SERVICE_NAME, factory);
incrementOtherOperationsStat();
for (Object result : results.values()) {
if (!(Boolean) toObject(result)) {
return false;
}
}
return true;
} catch (Throwable t) {
throw rethrow(t);
}
}
protected void incrementOtherOperationsStat() {
if (statisticsEnabled) {
localMapStats.incrementOtherOperations();
}
}
protected void getAllInternal(Set keys, List dataKeys, List