All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.hazelcast.cache.impl.CacheProxy Maven / Gradle / Ivy

There is a newer version: 4.5.4
Show newest version
/*
 * Copyright (c) 2008-2018, Hazelcast, Inc. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.hazelcast.cache.impl;

import com.hazelcast.cache.impl.event.CachePartitionLostEventFilter;
import com.hazelcast.cache.impl.event.CachePartitionLostListener;
import com.hazelcast.cache.impl.event.InternalCachePartitionLostListenerAdapter;
import com.hazelcast.cache.impl.journal.CacheEventJournalReadOperation;
import com.hazelcast.cache.impl.journal.CacheEventJournalSubscribeOperation;
import com.hazelcast.cache.impl.operation.CacheListenerRegistrationOperation;
import com.hazelcast.cache.journal.EventJournalCacheEvent;
import com.hazelcast.config.CacheConfig;
import com.hazelcast.core.ICompletableFuture;
import com.hazelcast.core.Member;
import com.hazelcast.internal.journal.EventJournalInitialSubscriberState;
import com.hazelcast.internal.journal.EventJournalReader;
import com.hazelcast.logging.ILogger;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.projection.Projection;
import com.hazelcast.ringbuffer.ReadResultSet;
import com.hazelcast.spi.EventFilter;
import com.hazelcast.spi.EventRegistration;
import com.hazelcast.spi.InternalCompletableFuture;
import com.hazelcast.spi.NodeEngine;
import com.hazelcast.spi.Operation;
import com.hazelcast.spi.OperationService;
import com.hazelcast.util.function.Predicate;

import javax.cache.CacheException;
import javax.cache.configuration.CacheEntryListenerConfiguration;
import javax.cache.configuration.Configuration;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.integration.CompletionListener;
import javax.cache.processor.EntryProcessor;
import javax.cache.processor.EntryProcessorException;
import javax.cache.processor.EntryProcessorResult;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;

import static com.hazelcast.cache.impl.CacheProxyUtil.validateNotNull;
import static com.hazelcast.util.ExceptionUtil.rethrowAllowedTypeFirst;
import static com.hazelcast.util.MapUtil.createHashMap;
import static com.hazelcast.util.Preconditions.checkNotNull;
import static com.hazelcast.util.SetUtil.createHashSet;

/**
 * 

ICache implementation

*

* This proxy is the implementation of ICache and javax.cache.Cache which is returned by * HazelcastServerCacheManager. It represents a cache for server or embedded mode. *

*

* Each cache method actually is an operation which is sent to related partition(s) or node(s). * Operations are executed on partition's or node's executor pools and the results are delivered to the user. *

*

* In order to access a {@linkplain CacheProxy} by name, a cacheManager should be used. It's advised to use * {@link com.hazelcast.cache.ICache} instead. *

* * @param the type of key. * @param the type of value. */ @SuppressWarnings({"checkstyle:methodcount", "checkstyle:classfanoutcomplexity"}) public class CacheProxy extends AbstractCacheProxy implements EventJournalReader> { protected final ILogger logger; CacheProxy(CacheConfig cacheConfig, NodeEngine nodeEngine, ICacheService cacheService) { super(cacheConfig, nodeEngine, cacheService); logger = getNodeEngine().getLogger(getClass()); } @Override public V get(K key) { return get(key, null); } @Override public Map getAll(Set keys) { return getAll(keys, null); } @Override public boolean containsKey(K key) { ensureOpen(); validateNotNull(key); Data dataKey = serializationService.toData(key); Operation operation = operationProvider.createContainsKeyOperation(dataKey); OperationService operationService = getNodeEngine().getOperationService(); int partitionId = getPartitionId(dataKey); InternalCompletableFuture future = operationService.invokeOnPartition(getServiceName(), operation, partitionId); return future.join(); } @Override public void loadAll(Set keys, boolean replaceExistingValues, CompletionListener completionListener) { ensureOpen(); validateNotNull(keys); for (K key : keys) { CacheProxyUtil.validateConfiguredTypes(cacheConfig, key); } Set keysData = createHashSet(keys.size()); for (K key : keys) { validateNotNull(key); keysData.add(serializationService.toData(key)); } LoadAllTask loadAllTask = new LoadAllTask(operationProvider, keysData, replaceExistingValues, completionListener); try { submitLoadAllTask(loadAllTask); } catch (Exception e) { if (completionListener != null) { completionListener.onException(e); } throw new CacheException(e); } } @Override public void put(K key, V value) { put(key, value, null); } @Override public V getAndPut(K key, V value) { return getAndPut(key, value, null); } @Override public void putAll(Map map) { putAll(map, null); } @Override public boolean putIfAbsent(K key, V value) { return putIfAbsent(key, value, null); } @Override public boolean remove(K key) { try { InternalCompletableFuture future = removeAsyncInternal(key, null, false, false, true); return future.get(); } catch (Throwable e) { throw rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public boolean remove(K key, V oldValue) { try { InternalCompletableFuture future = removeAsyncInternal(key, oldValue, true, false, true); return future.get(); } catch (Throwable e) { throw rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public V getAndRemove(K key) { try { InternalCompletableFuture future = removeAsyncInternal(key, null, false, true, true); return future.get(); } catch (Throwable e) { throw rethrowAllowedTypeFirst(e, CacheException.class); } } @Override public boolean replace(K key, V oldValue, V newValue) { return replace(key, oldValue, newValue, null); } @Override public boolean replace(K key, V value) { return replace(key, value, (ExpiryPolicy) null); } @Override public V getAndReplace(K key, V value) { return getAndReplace(key, value, null); } @Override public void removeAll(Set keys) { ensureOpen(); validateNotNull(keys); if (keys.isEmpty()) { return; } removeAllInternal(keys); } @Override public void removeAll() { ensureOpen(); removeAllInternal(null); } @Override public void clear() { ensureOpen(); clearInternal(); } @Override public > C getConfiguration(Class clazz) { if (clazz.isInstance(cacheConfig)) { return clazz.cast(cacheConfig.getAsReadOnly()); } throw new IllegalArgumentException("The configuration class " + clazz + " is not supported by this implementation"); } @Override public T invoke(K key, EntryProcessor entryProcessor, Object... arguments) throws EntryProcessorException { ensureOpen(); validateNotNull(key); checkNotNull(entryProcessor, "Entry Processor is null"); Data keyData = serializationService.toData(key); Integer completionId = registerCompletionLatch(1); Operation op = operationProvider.createEntryProcessorOperation(keyData, completionId, entryProcessor, arguments); try { OperationService operationService = getNodeEngine().getOperationService(); int partitionId = getPartitionId(keyData); InternalCompletableFuture future = operationService.invokeOnPartition(getServiceName(), op, partitionId); T safely = future.join(); waitCompletionLatch(completionId); return safely; } catch (CacheException ce) { deregisterCompletionLatch(completionId); throw ce; } catch (Exception e) { deregisterCompletionLatch(completionId); throw new EntryProcessorException(e); } } @Override public Map> invokeAll(Set keys, EntryProcessor entryProcessor, Object... arguments) { // TODO: implement a multiple (batch) invoke operation and its factory ensureOpen(); validateNotNull(keys); checkNotNull(entryProcessor, "Entry Processor is null"); Map> allResult = createHashMap(keys.size()); for (K key : keys) { validateNotNull(key); CacheEntryProcessorResult ceResult; try { T result = invoke(key, entryProcessor, arguments); ceResult = result != null ? new CacheEntryProcessorResult(result) : null; } catch (Exception e) { ceResult = new CacheEntryProcessorResult(e); } if (ceResult != null) { allResult.put(key, ceResult); } } return allResult; } @Override public T unwrap(Class clazz) { if (clazz.isAssignableFrom(((Object) this).getClass())) { return clazz.cast(this); } throw new IllegalArgumentException("Unwrapping to " + clazz + " is not supported by this implementation"); } @Override public void registerCacheEntryListener(CacheEntryListenerConfiguration cacheEntryListenerConfiguration) { registerCacheEntryListener(cacheEntryListenerConfiguration, true); } @Override public void registerCacheEntryListener(CacheEntryListenerConfiguration cacheEntryListenerConfiguration, boolean addToConfig) { ensureOpen(); checkNotNull(cacheEntryListenerConfiguration, "CacheEntryListenerConfiguration can't be null"); CacheEventListenerAdaptor entryListener = new CacheEventListenerAdaptor(this, cacheEntryListenerConfiguration, getNodeEngine().getSerializationService()); String regId = getService().registerListener(getDistributedObjectName(), entryListener, entryListener, false); if (regId != null) { if (addToConfig) { cacheConfig.addCacheEntryListenerConfiguration(cacheEntryListenerConfiguration); } addListenerLocally(regId, cacheEntryListenerConfiguration); if (addToConfig) { updateCacheListenerConfigOnOtherNodes(cacheEntryListenerConfiguration, true); } } } @Override public void deregisterCacheEntryListener(CacheEntryListenerConfiguration cacheEntryListenerConfiguration) { checkNotNull(cacheEntryListenerConfiguration, "CacheEntryListenerConfiguration can't be null"); String regId = getListenerIdLocal(cacheEntryListenerConfiguration); if (regId != null) { if (getService().deregisterListener(getDistributedObjectName(), regId)) { removeListenerLocally(cacheEntryListenerConfiguration); cacheConfig.removeCacheEntryListenerConfiguration(cacheEntryListenerConfiguration); updateCacheListenerConfigOnOtherNodes(cacheEntryListenerConfiguration, false); } } } private void updateCacheListenerConfigOnOtherNodes(CacheEntryListenerConfiguration cacheEntryListenerConfiguration, boolean isRegister) { OperationService operationService = getNodeEngine().getOperationService(); Collection members = getNodeEngine().getClusterService().getMembers(); for (Member member : members) { if (!member.localMember()) { Operation op = new CacheListenerRegistrationOperation(getDistributedObjectName(), cacheEntryListenerConfiguration, isRegister); operationService.invokeOnTarget(CacheService.SERVICE_NAME, op, member.getAddress()); } } } @Override public Iterator> iterator() { ensureOpen(); return new ClusterWideIterator(this, false); } @Override public Iterator> iterator(int fetchSize) { ensureOpen(); return new ClusterWideIterator(this, fetchSize, false); } @Override public Iterator> iterator(int fetchSize, int partitionId, boolean prefetchValues) { ensureOpen(); return new CachePartitionIterator(this, fetchSize, partitionId, prefetchValues); } @Override public String addPartitionLostListener(CachePartitionLostListener listener) { checkNotNull(listener, "CachePartitionLostListener can't be null"); EventFilter filter = new CachePartitionLostEventFilter(); InternalCachePartitionLostListenerAdapter listenerAdapter = new InternalCachePartitionLostListenerAdapter(listener); injectDependencies(listener); EventRegistration registration = getService().getNodeEngine().getEventService() .registerListener(AbstractCacheService.SERVICE_NAME, name, filter, listenerAdapter); return registration.getId(); } @Override public boolean removePartitionLostListener(String id) { checkNotNull(id, "Listener ID should not be null!"); return getService().getNodeEngine().getEventService() .deregisterListener(AbstractCacheService.SERVICE_NAME, name, id); } @Override public ICompletableFuture subscribeToEventJournal(int partitionId) { final CacheEventJournalSubscribeOperation op = new CacheEventJournalSubscribeOperation(nameWithPrefix); op.setPartitionId(partitionId); return getNodeEngine().getOperationService().invokeOnPartition(op); } @Override public ICompletableFuture> readFromEventJournal( long startSequence, int minSize, int maxSize, int partitionId, Predicate> predicate, Projection, ? extends T> projection ) { if (maxSize < minSize) { throw new IllegalArgumentException("maxSize " + maxSize + " must be greater or equal to minSize " + minSize); } final CacheEventJournalReadOperation op = new CacheEventJournalReadOperation( nameWithPrefix, startSequence, minSize, maxSize, predicate, projection); op.setPartitionId(partitionId); return getNodeEngine().getOperationService().invokeOnPartition(op); } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy