All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.hazelcast.cache.impl.AbstractCacheProxyBase Maven / Gradle / Ivy

/*
 * Copyright (c) 2008-2018, Hazelcast, Inc. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.hazelcast.cache.impl;

import com.hazelcast.config.CacheConfig;
import com.hazelcast.core.ExecutionCallback;
import com.hazelcast.core.ManagedContext;
import com.hazelcast.logging.ILogger;
import com.hazelcast.nio.Address;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.spi.AbstractDistributedObject;
import com.hazelcast.spi.ExecutionService;
import com.hazelcast.spi.NodeEngine;
import com.hazelcast.spi.OperationFactory;
import com.hazelcast.spi.OperationService;
import com.hazelcast.spi.partition.IPartitionService;
import com.hazelcast.spi.serialization.SerializationService;
import com.hazelcast.util.executor.CompletableFutureTask;

import javax.cache.CacheException;
import javax.cache.integration.CompletionListener;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;

import static com.hazelcast.cache.impl.CacheProxyUtil.validateResults;
import static com.hazelcast.util.ExceptionUtil.rethrow;
import static com.hazelcast.util.MapUtil.createHashMap;
import static com.hazelcast.util.SetUtil.createHashSet;

/**
 * Abstract class providing cache open/close operations and {@link NodeEngine}, {@link CacheService} and
 * {@link SerializationService} accessor which will be used by implementation of {@link com.hazelcast.cache.ICache}
 * in server or embedded mode.
 *
 * @param  the type of key.
 * @param  the type of value.
 * @see com.hazelcast.cache.impl.CacheProxy
 */
abstract class AbstractCacheProxyBase
        extends AbstractDistributedObject
        implements ICacheInternal {

    private static final int TIMEOUT = 10;

    /**
     * Speculative factor to be used when initialising collections
     * of an approximate final size.
     */
    private static final double SIZING_FUDGE_FACTOR = 1.3;

    protected final ILogger logger;
    protected final CacheConfig cacheConfig;
    protected final String name;
    protected final String nameWithPrefix;
    protected final ICacheService cacheService;
    protected final SerializationService serializationService;
    protected final CacheOperationProvider operationProvider;
    protected final IPartitionService partitionService;

    private final NodeEngine nodeEngine;
    private final CopyOnWriteArrayList loadAllTasks = new CopyOnWriteArrayList();

    private final AtomicBoolean isClosed = new AtomicBoolean(false);
    private final AtomicBoolean isDestroyed = new AtomicBoolean(false);

    AbstractCacheProxyBase(CacheConfig cacheConfig, NodeEngine nodeEngine, ICacheService cacheService) {
        super(nodeEngine, cacheService);
        this.name = cacheConfig.getName();
        this.nameWithPrefix = cacheConfig.getNameWithPrefix();
        this.cacheConfig = cacheConfig;
        this.nodeEngine = nodeEngine;
        this.logger = nodeEngine.getLogger(getClass());
        this.partitionService = nodeEngine.getPartitionService();
        this.cacheService = cacheService;
        this.serializationService = nodeEngine.getSerializationService();
        this.operationProvider =
                cacheService.getCacheOperationProvider(nameWithPrefix, cacheConfig.getInMemoryFormat());
    }

    void injectDependencies(Object obj) {
        ManagedContext managedContext = serializationService.getManagedContext();
        managedContext.initialize(obj);
    }

    @Override
    public String getName() {
        return name;
    }

    @Override
    protected String getDistributedObjectName() {
        return nameWithPrefix;
    }

    @Override
    public String getPrefixedName() {
        return nameWithPrefix;
    }

    @Override
    public String getServiceName() {
        return ICacheService.SERVICE_NAME;
    }

    @Override
    public void open() {
        if (isDestroyed.get()) {
            throw new IllegalStateException("Cache is already destroyed! Cannot be reopened");
        }
        isClosed.compareAndSet(true, false);
    }

    @Override
    public void close() {
        close0(false);
    }

    private void close0(boolean destroy) {
        if (!isClosed.compareAndSet(false, true)) {
            return;
        }
        Exception caughtException = null;
        for (Future f : loadAllTasks) {
            try {
                f.get(TIMEOUT, TimeUnit.SECONDS);
            } catch (Exception e) {
                if (caughtException == null) {
                    caughtException = e;
                }
                getNodeEngine().getLogger(getClass()).warning("Problem while waiting for loadAll tasks to complete", e);
            }
        }
        loadAllTasks.clear();

        closeListeners();
        if (!destroy) {
            // when cache is being destroyed, the CacheManager is still required for cleanup and reset in postDestroy
            // when cache is being closed, the CacheManager is reset now
            resetCacheManager();
        }
        if (caughtException != null) {
            throw new CacheException("Problem while waiting for loadAll tasks to complete", caughtException);
        }
    }

    @Override
    protected boolean preDestroy() {
        close0(true);
        if (!isDestroyed.compareAndSet(false, true)) {
            return false;
        }
        isClosed.set(true);
        return true;
    }

    @Override
    public boolean isClosed() {
        return isClosed.get();
    }

    @Override
    public boolean isDestroyed() {
        return isDestroyed.get();
    }

    abstract void closeListeners();

    void ensureOpen() {
        if (isClosed()) {
            throw new IllegalStateException("Cache operations can not be performed. The cache closed");
        }
    }

    @SuppressWarnings("unchecked")
    void submitLoadAllTask(LoadAllTask loadAllTask) {
        ExecutionService executionService = nodeEngine.getExecutionService();
        final CompletableFutureTask future =
                (CompletableFutureTask) executionService.submit("loadAll-" + nameWithPrefix, loadAllTask);
        loadAllTasks.add(future);
        future.andThen(new ExecutionCallback() {
            @Override
            public void onResponse(Object response) {
                loadAllTasks.remove(future);
            }

            @Override
            public void onFailure(Throwable t) {
                loadAllTasks.remove(future);
                getNodeEngine().getLogger(getClass()).warning("Problem in loadAll task", t);
            }
        });
    }

    @Override
    public boolean equals(Object o) {
        if (this == o) {
            return true;
        }
        if (o == null || getClass() != o.getClass()) {
            return false;
        }

        AbstractCacheProxyBase that = (AbstractCacheProxyBase) o;
        if (nameWithPrefix != null ? !nameWithPrefix.equals(that.nameWithPrefix) : that.nameWithPrefix != null) {
            return false;
        }
        return true;
    }

    @Override
    public int hashCode() {
        return nameWithPrefix != null ? nameWithPrefix.hashCode() : 0;
    }

    @Override
    public String toString() {
        return getClass().getName() + '{' + "name=" + name + ", nameWithPrefix=" + nameWithPrefix + '}';
    }

    final class LoadAllTask implements Runnable {

        private final CompletionListener completionListener;
        private final CacheOperationProvider operationProvider;
        private final Set keysData;
        private final boolean replaceExistingValues;

        LoadAllTask(CacheOperationProvider operationProvider, Set keysData,
                    boolean replaceExistingValues, CompletionListener completionListener) {
            this.operationProvider = operationProvider;
            this.keysData = keysData;
            this.replaceExistingValues = replaceExistingValues;
            this.completionListener = completionListener;
        }

        @Override
        public void run() {
            try {
                injectDependencies(completionListener);

                OperationService operationService = getNodeEngine().getOperationService();
                OperationFactory operationFactory;

                IPartitionService partitionService = getNodeEngine().getPartitionService();
                Map> memberPartitionsMap = partitionService.getMemberPartitionsMap();
                Map results = createHashMap(partitionService.getPartitionCount());

                for (Map.Entry> memberPartitions : memberPartitionsMap.entrySet()) {
                    Set partitions = new HashSet(memberPartitions.getValue());
                    Set ownerKeys = filterOwnerKeys(partitionService, partitions);
                    operationFactory = operationProvider.createLoadAllOperationFactory(ownerKeys, replaceExistingValues);
                    Map memberResults;
                    memberResults = operationService.invokeOnPartitions(getServiceName(), operationFactory, partitions);
                    results.putAll(memberResults);
                }

                validateResults(results);
                if (completionListener != null) {
                    completionListener.onCompletion();
                }
            } catch (Exception e) {
                if (completionListener != null) {
                    completionListener.onException(e);
                }
            } catch (Throwable t) {
                if (t instanceof OutOfMemoryError) {
                    throw rethrow(t);
                } else {
                    if (completionListener != null) {
                        completionListener.onException(new CacheException(t));
                    }
                }
            }
        }

        private Set filterOwnerKeys(IPartitionService partitionService, Set partitions) {
            //assume that the key data is evenly distributed over the partition count, so multiply by number of partitions
            final int roughSize = (int) (keysData.size() * partitions.size()
                    / (double) partitionService.getPartitionCount() * SIZING_FUDGE_FACTOR);
            Set ownerKeys = createHashSet(roughSize);
            for (Data key : keysData) {
                int keyPartitionId = partitionService.getPartitionId(key);
                if (partitions.contains(keyPartitionId)) {
                    ownerKeys.add(key);
                }
            }
            return ownerKeys;
        }
    }
}