All Downloads are FREE. Search and download functionalities are using the official Maven repository.
Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
com.alibaba.schedulerx.worker.master.ShardingTaskMaster Maven / Gradle / Ivy
package com.alibaba.schedulerx.worker.master;
import java.security.InvalidParameterException;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import com.alibaba.schedulerx.common.domain.InstanceStatus;
import com.alibaba.schedulerx.common.domain.JobInstanceInfo;
import com.alibaba.schedulerx.common.domain.ShardingTaskProgress;
import com.alibaba.schedulerx.common.domain.ShardingTaskStatus;
import com.alibaba.schedulerx.common.domain.TaskProgressCounter;
import com.alibaba.schedulerx.common.domain.TaskStatus;
import com.alibaba.schedulerx.common.domain.WorkerProgressCounter;
import com.alibaba.schedulerx.common.util.ExceptionUtil;
import com.alibaba.schedulerx.common.util.JsonUtil;
import com.alibaba.schedulerx.protocol.Worker.ContainerReportTaskStatusRequest;
import com.alibaba.schedulerx.protocol.Worker.MasterStartContainerRequest;
import com.alibaba.schedulerx.worker.domain.ShardingTask;
import com.alibaba.schedulerx.worker.log.LogFactory;
import com.alibaba.schedulerx.worker.log.Logger;
import com.alibaba.schedulerx.worker.logcollector.ClientLoggerMessage;
import com.alibaba.schedulerx.worker.logcollector.LogCollector;
import com.alibaba.schedulerx.worker.logcollector.LogCollectorFactory;
import com.alibaba.schedulerx.worker.processor.ProcessResult;
import com.alibaba.schedulerx.worker.util.SerializationUtil;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.protobuf.ByteString;
import akka.actor.ActorContext;
/**
* @author xiaomeng.hxm
*/
public class ShardingTaskMaster extends GridTaskMaster {
private Map shardingTaskStatusMap = Maps.newConcurrentMap();
private static final Logger LOGGER = LogFactory.getLogger(ShardingTaskMaster.class);
private LogCollector logCollector = LogCollectorFactory.get();
private String[] parameters;
public ShardingTaskMaster(JobInstanceInfo jobInstanceInfo, ActorContext actorContext) throws Exception {
super(jobInstanceInfo, actorContext);
}
private void parseShardingParameters(JobInstanceInfo info) {
String shardingParameters;
Long wfInstanceId = info.getWfInstanceId();
// 工作流中的分片任务,分片参数取原始任务配置的分片参数
if (wfInstanceId != null && wfInstanceId.longValue() > 0) {
shardingParameters = info.getParameters();
} else {
shardingParameters = StringUtils.isNotEmpty(info.getInstanceParameters()) ?
info.getInstanceParameters() : info.getParameters();
}
if (StringUtils.isEmpty(shardingParameters)) {
throw new InvalidParameterException("sharding parameters is empty");
}
parameters = shardingParameters.split(",|\n|\r");
}
@Override
public synchronized void submitInstance(JobInstanceInfo info) {
try {
parseShardingParameters(info);
int shardingNum = parameters.length;
List startContainerRequests = Lists.newArrayList();
for (String para : parameters) {
String[] tokens = para.split("=");
if (tokens.length != 2) {
throw new InvalidParameterException("invalid sharding parameters, should be like 0=a,1=b,2=c");
}
long shardingId = Long.valueOf(tokens[0]);
String taskName = tokens[0];
String shardingParameter = tokens[1];
if (taskProgressMap.containsKey(taskName)) {
throw new InvalidParameterException("shardingId={} is duplicated");
}
ShardingTask task = new ShardingTask(shardingId, shardingParameter);
MasterStartContainerRequest.Builder builder = convert2StartContainerRequestBuilder(info, shardingId,
taskName, ByteString.copyFrom(SerializationUtil.serialize(task)), false);
builder.setShardingNum(shardingNum);
MasterStartContainerRequest startContainerRequest = builder.build();
startContainerRequests.add(startContainerRequest);
TaskProgressCounter taskProgressCounter = new TaskProgressCounter(taskName);
taskProgressCounter.incrementTotal();
taskProgressMap.put(taskName, taskProgressCounter);
}
startBatchHandler();
batchDispatchTasks(startContainerRequests);
init();
} catch (Throwable e) {
String jobIdAndInstanceId = jobInstanceInfo.getJobId() + "_" + jobInstanceInfo.getJobInstanceId();
LOGGER.error("", e);
updateNewInstanceStatus(getSerialNum(), InstanceStatus.FAILED, ExceptionUtil.getMessage(e));
logCollector.collect(jobInstanceInfo.getAppGroupId(), jobIdAndInstanceId, ClientLoggerMessage.INSTANCE_INIT_FAIL, e, jobInstanceInfo.getGroupId());
}
}
@Override
protected MasterStartContainerRequest.Builder convert2StartContainerRequestBuilder(JobInstanceInfo jobInstanceInfo, long taskId, String taskName, ByteString taskBody, boolean failover) {
MasterStartContainerRequest.Builder builder = super.convert2StartContainerRequestBuilder(jobInstanceInfo, taskId, taskName, taskBody, failover);
builder.setShardingNum(parameters.length);
return builder;
}
@Override
public void batchUpdateTaskStatues(List requests) {
super.batchUpdateTaskStatues(requests);
for (ContainerReportTaskStatusRequest request : requests) {
long taskId = request.getTaskId();
int taskStatus = request.getStatus();
String workerAddr = request.getWorkerAddr();
if (shardingTaskStatusMap.containsKey(taskId)) {
ShardingTaskStatus shardingTaskStatus = shardingTaskStatusMap.get(taskId);
shardingTaskStatus.setStatus(taskStatus);
} else {
ShardingTaskStatus shardingTaskStatus = new ShardingTaskStatus(taskId, workerAddr, taskStatus);
shardingTaskStatusMap.put(taskId, shardingTaskStatus);
}
}
}
@Override
protected void batchHandlePulledProgress(List masterStartContainerRequests,
Map> worker2ReqsWithNormal,
Map> worker2ReqsWithFailover,
String remoteWorker) {
for (MasterStartContainerRequest request : masterStartContainerRequests) {
String workerIdAddr = ((remoteWorker != null) ? remoteWorker : selectWorker(request.getFailover()));
if (workerIdAddr == null) {
updateNewInstanceStatus(getSerialNum(), InstanceStatus.FAILED, "all worker is down!");
break;
}
String workerAddr = workerIdAddr.split("@")[1];
if (request.getFailover()) {
if (!worker2ReqsWithFailover.containsKey(workerIdAddr)) {
worker2ReqsWithFailover.put(workerIdAddr, Lists.newArrayList(request));
} else {
worker2ReqsWithFailover.get(workerIdAddr).add(request);
}
} else {
if (!worker2ReqsWithNormal.containsKey(workerIdAddr)) {
worker2ReqsWithNormal.put(workerIdAddr, Lists.newArrayList(request));
} else {
worker2ReqsWithNormal.get(workerIdAddr).add(request);
}
taskProgressMap.get(request.getTaskName()).incrementPulled();
}
if (workerAddr != null && !workerProgressMap.containsKey(workerAddr)) {
synchronized (this) {
if (!workerProgressMap.containsKey(workerAddr)) {
WorkerProgressCounter workerProgressCounter = new WorkerProgressCounter(workerAddr);
workerProgressMap.put(workerAddr, workerProgressCounter);
}
}
}
workerProgressMap.get(workerAddr).incrementTotal();
workerProgressMap.get(workerAddr).incrementPulled();
ShardingTaskStatus shardingTaskStatus = new ShardingTaskStatus(request.getTaskId(), workerAddr,
TaskStatus.INIT.getValue());
shardingTaskStatusMap.put(request.getTaskId(), shardingTaskStatus);
}
}
@Override
public String getJobInstanceProgress() {
ShardingTaskProgress detail = new ShardingTaskProgress();
detail.setShardingProgress(shardingTaskStatusMap.values());
return JsonUtil.toJson(detail);
}
@Override
protected void checkProcessor() throws Exception {
// nothing to do
}
@Override
public ProcessResult postFinish(long jobInstanceId) {
try {
taskPersistence.clearTasks(jobInstanceId);
} catch (Throwable e) {
LOGGER.error("", e);
}
return new ProcessResult(true);
}
@Override
public void clear() {
super.clear();
if (shardingTaskStatusMap != null) {
shardingTaskStatusMap.clear();
}
}
}