Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
*
*
* Note that due to potential task migration to another nodes every {@link Callable},
* {@link Runnable} and/or {@link DistributedCallable} submitted must be either {@link Serializable}
* or {@link Externalizable}. Also the value returned from a callable must be {@link Serializable}
* or {@link Externalizable}. Unfortunately if the value returned is not serializable then a
* {@link NotSerializableException} will be thrown.
*
* @author Vladimir Blagojevic
* @since 5.0
*
*/
public class DefaultExecutorService extends AbstractExecutorService implements DistributedExecutorService {
private static final NodeFilter SAME_MACHINE_FILTER = new NodeFilter(){
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return thisAddress.isSameMachine(otherAddress);
};
};
private static final NodeFilter SAME_RACK_FILTER = new NodeFilter(){
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return thisAddress.isSameRack(otherAddress);
};
};
private static final NodeFilter SAME_SITE_FILTER = new NodeFilter(){
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return thisAddress.isSameSite(otherAddress);
};
};
private static final NodeFilter ALL_FILTER = new NodeFilter(){
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return true;
};
};
public static final DistributedTaskFailoverPolicy NO_FAILOVER = new NoTaskFailoverPolicy();
public static final DistributedTaskFailoverPolicy RANDOM_NODE_FAILOVER = new RandomNodeTaskFailoverPolicy();
private static final Log log = LogFactory.getLog(DefaultExecutorService.class);
private static final boolean trace = log.isTraceEnabled();
protected final AtomicBoolean isShutdown = new AtomicBoolean(false);
protected final AdvancedCache cache;
protected final RpcManager rpc;
protected final InterceptorChain invoker;
protected final CommandsFactory factory;
protected final Marshaller marshaller;
protected final ExecutorService localExecutorService;
protected final CancellationService cancellationService;
protected final ClusteringDependentLogic clusterDependentLogic;
protected final boolean takeExecutorOwnership;
private final TimeService timeService;
/**
* Creates a new DefaultExecutorService given a master cache node for local task execution. All
* distributed task executions will be initiated from this Infinispan cache node
*
* @param masterCacheNode
* Cache node initiating distributed task
*/
public DefaultExecutorService(Cache, ?> masterCacheNode) {
this(masterCacheNode, Executors.newSingleThreadExecutor(), true);
}
/**
* Creates a new DefaultExecutorService given a master cache node and an ExecutorService for
* parallel execution of tasks ran on this node. All distributed task executions will be
* initiated from this Infinispan cache node.
*
* Note that DefaultExecutorService will not shutdown client supplied localExecutorService once
* this DefaultExecutorService is shutdown. Lifecycle management of a supplied ExecutorService is
* left to the client
*
* Also note that client supplied ExecutorService should not execute tasks in the caller's thread
* ( i.e rejectionHandler of {@link ThreadPoolExecutor} configured with {link
* {@link ThreadPoolExecutor.CallerRunsPolicy})
*
* @param masterCacheNode
* Cache node initiating distributed task
* @param localExecutorService
* ExecutorService to run local tasks
*/
public DefaultExecutorService(Cache, ?> masterCacheNode, ExecutorService localExecutorService) {
this(masterCacheNode, localExecutorService, false);
}
/**
* Creates a new DefaultExecutorService given a master cache node and an ExecutorService for
* parallel execution of task ran on this node. All distributed task executions will be initiated
* from this Infinispan cache node.
*
* @param masterCacheNode
* Cache node initiating distributed task
* @param localExecutorService
* ExecutorService to run local tasks
* @param takeExecutorOwnership
* if true {@link DistributedExecutorService#shutdown()} and
* {@link DistributedExecutorService#shutdownNow()} method will shutdown
* localExecutorService as well
*
*/
public DefaultExecutorService(Cache, ?> masterCacheNode, ExecutorService localExecutorService,
boolean takeExecutorOwnership) {
super();
if (masterCacheNode == null)
throw new IllegalArgumentException("Can not use null cache for DefaultExecutorService");
else if (localExecutorService == null)
throw new IllegalArgumentException("Can not use null instance of ExecutorService");
else if (localExecutorService.isShutdown())
throw new IllegalArgumentException("Can not use an instance of ExecutorService which is shutdown");
ensureAccessPermissions(masterCacheNode.getAdvancedCache());
ensureProperCacheState(masterCacheNode.getAdvancedCache());
this.cache = masterCacheNode.getAdvancedCache();
ComponentRegistry registry = SecurityActions.getCacheComponentRegistry(cache);
this.rpc = SecurityActions.getCacheRpcManager(cache);
this.invoker = registry.getComponent(InterceptorChain.class);
this.factory = registry.getComponent(CommandsFactory.class);
this.marshaller = registry.getComponent(StreamingMarshaller.class, CACHE_MARSHALLER);
this.cancellationService = registry.getComponent(CancellationService.class);
this.localExecutorService = localExecutorService;
this.takeExecutorOwnership = takeExecutorOwnership;
this.timeService = registry.getTimeService();
this.clusterDependentLogic = registry.getComponent(ClusteringDependentLogic.class);
}
@Override
public DistributedTaskBuilder createDistributedTaskBuilder(Callable callable) {
Configuration cacheConfiguration = SecurityActions.getCacheConfiguration(cache);
long to = cacheConfiguration.clustering().sync().replTimeout();
DistributedTaskBuilder dtb = new DefaultDistributedTaskBuilder(to);
dtb.callable(callable);
return dtb;
}
@Override
public NotifyingFuture submit(Runnable task, T result) {
return (NotifyingFuture) super.submit(task, result);
}
@Override
public NotifyingFuture submit(Callable task) {
return (NotifyingFuture) super.submit(task);
}
@Override
public void shutdown() {
realShutdown(false);
}
protected List getMembers() {
if (rpc != null) {
return rpc.getMembers();
} else {
return Collections.singletonList(getAddress());
}
}
protected List executionCandidates(DistributedTask task) {
return filterMembers(task.getTaskExecutionPolicy(), getMembers());
}
private Address getAddress() {
return clusterDependentLogic.getAddress();
}
private List realShutdown(boolean interrupt) {
isShutdown.set(true);
// TODO cancel all tasks
if (takeExecutorOwnership) {
if (interrupt)
localExecutorService.shutdownNow();
else
localExecutorService.shutdown();
}
return InfinispanCollections.emptyList();
}
@Override
public List shutdownNow() {
return realShutdown(true);
}
@Override
public boolean isShutdown() {
return isShutdown.get();
}
@Override
public boolean isTerminated() {
if (isShutdown.get()) {
// TODO account for all tasks
return true;
}
return false;
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
//long nanoTimeWait = unit.toNanos(timeout);
// TODO wait for all tasks to finish
return true;
}
@Override
public T invokeAny(Collection extends Callable> tasks) throws InterruptedException,
ExecutionException {
try {
return doInvokeAny(tasks, false, 0);
} catch (TimeoutException cannotHappen) {
assert false;
return null;
}
}
@Override
public T invokeAny(Collection extends Callable> tasks, long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return doInvokeAny(tasks, true, unit.toNanos(timeout));
}
/**
* the main mechanics of invokeAny. This was essentially copied from
* {@link AbstractExecutorService} doInvokeAny except that we replaced the
* {@link ExecutorCompletionService} with our {@link DistributedExecutionCompletionService}.
*/
private T doInvokeAny(Collection extends Callable> tasks, boolean timed, long nanos)
throws InterruptedException, ExecutionException, TimeoutException {
if (tasks == null)
throw new NullPointerException();
int ntasks = tasks.size();
if (ntasks == 0)
throw new IllegalArgumentException();
List> futures = new ArrayList>(ntasks);
CompletionService ecs = new DistributedExecutionCompletionService(this);
// For efficiency, especially in executors with limited
// parallelism, check to see if previously submitted tasks are
// done before submitting more of them. This interleaving
// plus the exception mechanics account for messiness of main
// loop.
try {
// Record exceptions so that if we fail to obtain any
// result, we can throw the last exception we got.
ExecutionException ee = null;
long lastTime = (timed) ? timeService.time() : 0;
Iterator extends Callable> it = tasks.iterator();
// Start one task for sure; the rest incrementally
futures.add(ecs.submit(it.next()));
--ntasks;
int active = 1;
for (;;) {
Future f = ecs.poll();
if (f == null) {
if (ntasks > 0) {
--ntasks;
futures.add(ecs.submit(it.next()));
++active;
} else if (active == 0)
break;
else if (timed) {
f = ecs.poll(nanos, TimeUnit.NANOSECONDS);
if (f == null)
throw new TimeoutException();
long now = timeService.time();
nanos -= timeService.timeDuration(lastTime, now, TimeUnit.NANOSECONDS);
lastTime = now;
} else
f = ecs.take();
}
if (f != null) {
--active;
try {
return f.get();
} catch (InterruptedException ie) {
throw ie;
} catch (ExecutionException eex) {
ee = eex;
} catch (RuntimeException rex) {
ee = new ExecutionException(rex);
}
}
}
if (ee == null)
ee = new ExecutionException() {
private static final long serialVersionUID = 200818694545553992L;
};
throw ee;
} finally {
for (Future f : futures)
f.cancel(true);
}
}
@Override
public void execute(Runnable command) {
if (!isShutdown.get()) {
DistributedTaskPart