com.alibaba.schedulerx.worker.pull.PullManager Maven / Gradle / Ivy
package com.alibaba.schedulerx.worker.pull;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import com.alibaba.schedulerx.common.util.ConfigUtil;
import com.alibaba.schedulerx.protocol.Worker.ContainerReportTaskStatusRequest;
import com.alibaba.schedulerx.worker.batch.ContainerStatusReqHandler;
import com.alibaba.schedulerx.worker.batch.ContainerStatusReqHandlerPool;
import com.alibaba.schedulerx.worker.batch.ReqQueue;
import com.alibaba.schedulerx.worker.container.ContainerFactory;
import com.alibaba.schedulerx.worker.container.ContainerPool;
import com.alibaba.schedulerx.worker.domain.WorkerConstants;
import com.alibaba.schedulerx.worker.log.LogFactory;
import com.alibaba.schedulerx.worker.log.Logger;
import com.alibaba.schedulerx.worker.util.WorkerConfigUtil;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
/**
*
* @author xiaomeng.hxm
*/
public enum PullManager {
INSTANCE;
private Map queueMap = Maps.newConcurrentMap();
private Map pullThreadMap = Maps.newConcurrentMap();
private Map consumerThreadMap = Maps.newConcurrentMap();
private ContainerStatusReqHandlerPool statusReqBatchHandlerPool = ContainerStatusReqHandlerPool.INSTANCE;
private Set crashedInstanceSet = Sets.newConcurrentHashSet();
private static Logger LOGGER = LogFactory.getLogger(PullManager.class);
public void init(long jobInstanceId, long serialNum, int pageSize, int queueSize, int consumerSize, String taskMasterAkkaPath) throws Exception {
ContainerPool containerPool = ContainerFactory.getContainerPool();
AtomicLong jobInstanceLock = containerPool.getInstanceLock(jobInstanceId, serialNum);
synchronized (jobInstanceLock) {
if (!queueMap.containsKey(jobInstanceId)) {
BlockingContainerQueue queue = new BlockingContainerQueue(queueSize);
queueMap.put(jobInstanceId, queue);
PullThread pullThread = new PullThread(jobInstanceId, serialNum, pageSize, taskMasterAkkaPath, queue);
pullThread.start();
pullThreadMap.put(jobInstanceId, pullThread);
boolean enableShareContainerPool = WorkerConfigUtil.isEnableShareContainerPool();
long statusReqBatchHandlerKey = (enableShareContainerPool ? 0 :jobInstanceId);
if (!statusReqBatchHandlerPool.contains(statusReqBatchHandlerKey)) {
ReqQueue reqQueue = new ReqQueue<>(statusReqBatchHandlerKey, 10 * 10000);
reqQueue.init();
int batchSize = ConfigUtil.getWorkerConfig().getInt(WorkerConstants.WORKER_MAP_PAGE_SIZE,
WorkerConstants.WORKER_MAP_PAGE_SIZE_DEFAULT);
statusReqBatchHandlerPool.start(statusReqBatchHandlerKey, new ContainerStatusReqHandler<>(statusReqBatchHandlerKey,
1, 1, batchSize, reqQueue, taskMasterAkkaPath));
}
ConsumerThread consumers[] = new ConsumerThread[consumerSize];
for (int i = 0; i < consumerSize; i++) {
consumers[i] = new ConsumerThread(queue, ContainerFactory.getContainerPool(), taskMasterAkkaPath);
new Thread(consumers[i], "Schedulerx-ConsumerThread-" + jobInstanceId + "-" + i).start();
}
consumerThreadMap.put(jobInstanceId, consumers);
} else {
PullThread pullThread = pullThreadMap.get(jobInstanceId);
if (pullThread!=null) {
pullThread.setSerialNum(serialNum);
}
}
}
}
public void crash(long jobInstanceId) {
crashedInstanceSet.add(jobInstanceId);
}
public void stop(long jobInstanceId) {
if (pullThreadMap.containsKey(jobInstanceId)) {
pullThreadMap.get(jobInstanceId).stopRunning();
pullThreadMap.remove(jobInstanceId);
}
if (consumerThreadMap.containsKey(jobInstanceId)) {
ConsumerThread consumers[] = consumerThreadMap.get(jobInstanceId);
for (ConsumerThread consumer : consumers) {
consumer.stopRunning();
}
consumerThreadMap.remove(jobInstanceId);
}
if (queueMap.containsKey(jobInstanceId)) {
queueMap.get(jobInstanceId).clear();
queueMap.remove(jobInstanceId);
}
crashedInstanceSet.remove(jobInstanceId);
}
public void stopAll() {
for (Long jobInstanceId:pullThreadMap.keySet()) {
stop(jobInstanceId);
}
}
public boolean contains(long jobInstanceId) {
return queueMap.containsKey(jobInstanceId);
}
public boolean isCrashed(long jobInstanceId) {
return crashedInstanceSet.contains(jobInstanceId);
}
public boolean hasConsumer() {
return consumerThreadMap.size() > 0;
}
}