com.sap.hana.datalake.files.utils.threads.ThreadUtils Maven / Gradle / Ivy
Show all versions of sap-hdlfs Show documentation
// © 2024 SAP SE or an SAP affiliate company. All rights reserved.
package com.sap.hana.datalake.files.utils.threads;
import com.sap.hana.datalake.files.HdlfsConstants;
import com.sap.hana.datalake.files.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.SemaphoredDelegatingExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@InterfaceAudience.Private
public class ThreadUtils {
private static final Logger LOG = LoggerFactory.getLogger(ThreadUtils.class);
public static ThreadFactory newDaemonThreadFactory(final String namePrefix, final Configuration conf) {
final Class extends ThreadFactoryProvider> threadFactoryProviderClass = conf.getClass(HdlfsConstants.FS_HDLFS_THREADS_FACTORY_PROVIDER_CLASS_KEY,
SimpleThreadFactoryProvider.class,
ThreadFactoryProvider.class);
final ThreadFactoryProvider threadFactoryProvider = ReflectionUtils.newInstance(threadFactoryProviderClass, conf);
return threadFactoryProvider.createDaemonThreadFactory(namePrefix);
}
public static ExecutorService newSingleDaemonThreadExecutor(final String namePrefix, final Configuration conf) {
final ThreadFactory threadFactory = newDaemonThreadFactory(namePrefix, conf);
return Executors.newSingleThreadExecutor(threadFactory);
}
public static ExecutorService newDaemonThreadFixedExecutor(final int numThreads, final String namePrefix, final Configuration conf) {
final ThreadFactory threadFactory = newDaemonThreadFactory(namePrefix, conf);
return Executors.newFixedThreadPool(numThreads, threadFactory);
}
public static ScheduledExecutorService newDaemonThreadScheduledExecutor(final int corePoolSize, final String namePrefix, final Configuration conf) {
final ThreadFactory threadFactory = newDaemonThreadFactory(namePrefix, conf);
return Executors.newScheduledThreadPool(corePoolSize, threadFactory);
}
/**
* This ExecutorService blocks the submission of new tasks when its queue is
* already full by using a semaphore. Task submissions require permits, task
* completions release permits.
*
* This is inspired by {@link org.apache.hadoop.util.BlockingThreadPoolExecutorService}.
*/
public static ExecutorService newDaemonThreadBlockingExecutor(final int activeTasks, final int waitingTasks, final boolean allowCoreThreadTimeOut,
final long keepAliveTime, final TimeUnit keepAliveTimeUnit, final boolean fairSemaphore,
final String namePrefix, final Configuration conf) {
final ThreadFactory threadFactory = newDaemonThreadFactory(namePrefix, conf);
/* Although we generally only expect up to waitingTasks tasks in the queue,
we need to be able to buffer all tasks in case de-queueing is
slower than enqueueing. */
final BlockingQueue workQueue = new LinkedBlockingQueue<>(waitingTasks + activeTasks);
final ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(activeTasks, activeTasks,
keepAliveTime, keepAliveTimeUnit, workQueue, threadFactory, (r, executor) -> {
// This is not expected to happen
LOG.error("Could not submit task to executor [{}]", executor.toString());
});
threadPoolExecutor.allowCoreThreadTimeOut(allowCoreThreadTimeOut);
return new SemaphoredDelegatingExecutor(threadPoolExecutor, waitingTasks + activeTasks, fairSemaphore);
}
}
// © 2024 SAP SE or an SAP affiliate company. All rights reserved.