org.infinispan.persistence.async.AsyncCacheWriter Maven / Gradle / Ivy
package org.infinispan.persistence.async;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.util.CollectionFactory;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.factories.threads.DefaultThreadFactory;
import org.infinispan.marshall.core.MarshalledEntry;
import org.infinispan.persistence.modifications.Modification;
import org.infinispan.persistence.modifications.ModificationsList;
import org.infinispan.persistence.modifications.Remove;
import org.infinispan.persistence.modifications.Store;
import org.infinispan.persistence.spi.CacheWriter;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.persistence.support.DelegatingCacheWriter;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import net.jcip.annotations.GuardedBy;
/**
* The AsyncCacheWriter is a delegating CacheStore that buffers changes and writes them asynchronously to
* the underlying CacheStore.
*
* Read operations are done synchronously, taking into account the current state of buffered changes.
*
* There is no provision for exception handling for problems encountered with the underlying store
* during a write operation, and the exception is just logged.
*
* When configuring the loader, use the following element:
*
* <async enabled="true" />
*
* to define whether cache loader operations are to be asynchronous. If not specified, a cache loader operation is
* assumed synchronous and this decorator is not applied.
*
* Write operations affecting same key are now coalesced so that only the final state is actually stored.
*
*
* @author Manik Surtani
* @author Galder Zamarreño
* @author Sanne Grinovero
* @author Karsten Blees
* @author Mircea Markus
* @since 4.0
*/
public class AsyncCacheWriter extends DelegatingCacheWriter {
private static final Log log = LogFactory.getLog(AsyncCacheWriter.class);
private static final boolean trace = log.isTraceEnabled();
private ExecutorService executor;
private Thread coordinator;
private int concurrencyLevel;
private String cacheName;
private String nodeName;
protected BufferLock stateLock;
@GuardedBy("stateLock")
protected final AtomicReference state = new AtomicReference<>();
@GuardedBy("stateLock")
private boolean stopped;
protected AsyncStoreConfiguration asyncConfiguration;
public AsyncCacheWriter(CacheWriter delegate) {
super(delegate);
}
@Override
public void init(InitializationContext ctx) {
super.init(ctx);
this.asyncConfiguration = ctx.getConfiguration().async();
Cache cache = ctx.getCache();
Configuration cacheCfg = cache != null ? cache.getCacheConfiguration() : null;
concurrencyLevel = cacheCfg != null ? cacheCfg.locking().concurrencyLevel() : 16;
cacheName = cache != null ? cache.getName() : null;
nodeName = cache != null ? cache.getCacheManager().getCacheManagerConfiguration().transport().nodeName() : null;
}
@Override
public void start() {
log.debugf("Async cache loader starting %s", this);
state.set(newState(false, null));
stopped = false;
stateLock = new BufferLock(asyncConfiguration.modificationQueueSize());
// Create a thread pool with unbounded work queue, so that all work is accepted and eventually
// executed. A bounded queue could throw RejectedExecutionException and thus lose data.
int poolSize = asyncConfiguration.threadPoolSize();
DefaultThreadFactory processorThreadFactory =
new DefaultThreadFactory(null, Thread.NORM_PRIORITY, DefaultThreadFactory.DEFAULT_PATTERN, nodeName,
"AsyncStoreProcessor");
executor = new ThreadPoolExecutor(poolSize, poolSize, 120L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(),
processorThreadFactory);
((ThreadPoolExecutor) executor).allowCoreThreadTimeOut(true);
DefaultThreadFactory coordinatorThreadFactory =
new DefaultThreadFactory(null, Thread.NORM_PRIORITY, DefaultThreadFactory.DEFAULT_PATTERN, nodeName,
"AsyncStoreCoordinator");
coordinator = coordinatorThreadFactory.newThread(new AsyncStoreCoordinator());
coordinator.start();
}
@Override
public void stop() {
if (trace) log.tracef("Stop async store %s", this);
stateLock.writeLock(0);
stopped = true;
stateLock.writeUnlock();
try {
// It is safe to wait without timeout because the thread pool uses an unbounded work queue (i.e.
// all work handed to the pool will be accepted and eventually executed) and AsyncStoreProcessors
// decrement the workerThreads latch in a finally block (i.e. even if the back-end store throws
// java.lang.Error). The coordinator thread can only block forever if the back-end's write() /
// remove() methods block, but this is no different from PassivationManager.stop() being blocked
// in a synchronous call to write() / remove().
coordinator.join();
// The coordinator thread waits for AsyncStoreProcessor threads to count down their latch (nearly
// at the end). Thus the threads should have terminated or terminate instantly.
executor.shutdown();
if (!executor.awaitTermination(1, TimeUnit.SECONDS))
log.errorAsyncStoreNotStopped();
} catch (InterruptedException e) {
log.interruptedWaitingAsyncStorePush(e);
Thread.currentThread().interrupt();
}
}
@Override
public void write(MarshalledEntry entry) {
put(new Store(entry.getKey(), entry), 1);
}
@Override
public void writeBatch(Iterable entries) {
putAll(
StreamSupport.stream((Spliterator) entries.spliterator(), false)
.map(me -> new Store(me.getKey(), me))
.collect(Collectors.toList())
);
}
@Override
public void deleteBatch(Iterable keys) {
putAll(
StreamSupport.stream((Spliterator
© 2015 - 2025 Weber Informatics LLC | Privacy Policy