Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: MRClientProtocol.proto
package org.apache.hadoop.yarn.proto;
public final class MRClientProtocol {
private MRClientProtocol() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
/**
* Protobuf service {@code hadoop.mapreduce.MRClientProtocolService}
*
*
* If making changes to this, please edit HSClientProtocolService
*
*/
public static abstract class MRClientProtocolService
implements com.google.protobuf.Service {
protected MRClientProtocolService() {}
public interface Interface {
/**
* rpc getJobReport(.hadoop.mapreduce.GetJobReportRequestProto) returns (.hadoop.mapreduce.GetJobReportResponseProto);
*/
public abstract void getJobReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getTaskReport(.hadoop.mapreduce.GetTaskReportRequestProto) returns (.hadoop.mapreduce.GetTaskReportResponseProto);
*/
public abstract void getTaskReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getTaskAttemptReport(.hadoop.mapreduce.GetTaskAttemptReportRequestProto) returns (.hadoop.mapreduce.GetTaskAttemptReportResponseProto);
*/
public abstract void getTaskAttemptReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getCounters(.hadoop.mapreduce.GetCountersRequestProto) returns (.hadoop.mapreduce.GetCountersResponseProto);
*/
public abstract void getCounters(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getTaskAttemptCompletionEvents(.hadoop.mapreduce.GetTaskAttemptCompletionEventsRequestProto) returns (.hadoop.mapreduce.GetTaskAttemptCompletionEventsResponseProto);
*/
public abstract void getTaskAttemptCompletionEvents(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getTaskReports(.hadoop.mapreduce.GetTaskReportsRequestProto) returns (.hadoop.mapreduce.GetTaskReportsResponseProto);
*/
public abstract void getTaskReports(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getDiagnostics(.hadoop.mapreduce.GetDiagnosticsRequestProto) returns (.hadoop.mapreduce.GetDiagnosticsResponseProto);
*/
public abstract void getDiagnostics(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getDelegationToken(.hadoop.common.GetDelegationTokenRequestProto) returns (.hadoop.common.GetDelegationTokenResponseProto);
*/
public abstract void getDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc killJob(.hadoop.mapreduce.KillJobRequestProto) returns (.hadoop.mapreduce.KillJobResponseProto);
*/
public abstract void killJob(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc killTask(.hadoop.mapreduce.KillTaskRequestProto) returns (.hadoop.mapreduce.KillTaskResponseProto);
*/
public abstract void killTask(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc killTaskAttempt(.hadoop.mapreduce.KillTaskAttemptRequestProto) returns (.hadoop.mapreduce.KillTaskAttemptResponseProto);
*/
public abstract void killTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc failTaskAttempt(.hadoop.mapreduce.FailTaskAttemptRequestProto) returns (.hadoop.mapreduce.FailTaskAttemptResponseProto);
*/
public abstract void failTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc renewDelegationToken(.hadoop.common.RenewDelegationTokenRequestProto) returns (.hadoop.common.RenewDelegationTokenResponseProto);
*/
public abstract void renewDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc cancelDelegationToken(.hadoop.common.CancelDelegationTokenRequestProto) returns (.hadoop.common.CancelDelegationTokenResponseProto);
*/
public abstract void cancelDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done);
}
public static com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new MRClientProtocolService() {
@java.lang.Override
public void getJobReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getJobReport(controller, request, done);
}
@java.lang.Override
public void getTaskReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getTaskReport(controller, request, done);
}
@java.lang.Override
public void getTaskAttemptReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getTaskAttemptReport(controller, request, done);
}
@java.lang.Override
public void getCounters(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getCounters(controller, request, done);
}
@java.lang.Override
public void getTaskAttemptCompletionEvents(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getTaskAttemptCompletionEvents(controller, request, done);
}
@java.lang.Override
public void getTaskReports(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getTaskReports(controller, request, done);
}
@java.lang.Override
public void getDiagnostics(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getDiagnostics(controller, request, done);
}
@java.lang.Override
public void getDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.getDelegationToken(controller, request, done);
}
@java.lang.Override
public void killJob(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.killJob(controller, request, done);
}
@java.lang.Override
public void killTask(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.killTask(controller, request, done);
}
@java.lang.Override
public void killTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.killTaskAttempt(controller, request, done);
}
@java.lang.Override
public void failTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.failTaskAttempt(controller, request, done);
}
@java.lang.Override
public void renewDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.renewDelegationToken(controller, request, done);
}
@java.lang.Override
public void cancelDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done) {
impl.cancelDelegationToken(controller, request, done);
}
};
}
public static com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new com.google.protobuf.BlockingService() {
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final com.google.protobuf.Message callBlockingMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request)
throws com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.getJobReport(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto)request);
case 1:
return impl.getTaskReport(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto)request);
case 2:
return impl.getTaskAttemptReport(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto)request);
case 3:
return impl.getCounters(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto)request);
case 4:
return impl.getTaskAttemptCompletionEvents(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto)request);
case 5:
return impl.getTaskReports(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto)request);
case 6:
return impl.getDiagnostics(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto)request);
case 7:
return impl.getDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto)request);
case 8:
return impl.killJob(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto)request);
case 9:
return impl.killTask(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto)request);
case 10:
return impl.killTaskAttempt(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto)request);
case 11:
return impl.failTaskAttempt(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto)request);
case 12:
return impl.renewDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto)request);
case 13:
return impl.cancelDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto.getDefaultInstance();
case 9:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto.getDefaultInstance();
case 10:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto.getDefaultInstance();
case 11:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto.getDefaultInstance();
case 12:
return org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto.getDefaultInstance();
case 13:
return org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto.getDefaultInstance();
case 9:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto.getDefaultInstance();
case 10:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto.getDefaultInstance();
case 11:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto.getDefaultInstance();
case 12:
return org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance();
case 13:
return org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
/**
* rpc getJobReport(.hadoop.mapreduce.GetJobReportRequestProto) returns (.hadoop.mapreduce.GetJobReportResponseProto);
*/
public abstract void getJobReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getTaskReport(.hadoop.mapreduce.GetTaskReportRequestProto) returns (.hadoop.mapreduce.GetTaskReportResponseProto);
*/
public abstract void getTaskReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getTaskAttemptReport(.hadoop.mapreduce.GetTaskAttemptReportRequestProto) returns (.hadoop.mapreduce.GetTaskAttemptReportResponseProto);
*/
public abstract void getTaskAttemptReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getCounters(.hadoop.mapreduce.GetCountersRequestProto) returns (.hadoop.mapreduce.GetCountersResponseProto);
*/
public abstract void getCounters(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getTaskAttemptCompletionEvents(.hadoop.mapreduce.GetTaskAttemptCompletionEventsRequestProto) returns (.hadoop.mapreduce.GetTaskAttemptCompletionEventsResponseProto);
*/
public abstract void getTaskAttemptCompletionEvents(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getTaskReports(.hadoop.mapreduce.GetTaskReportsRequestProto) returns (.hadoop.mapreduce.GetTaskReportsResponseProto);
*/
public abstract void getTaskReports(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getDiagnostics(.hadoop.mapreduce.GetDiagnosticsRequestProto) returns (.hadoop.mapreduce.GetDiagnosticsResponseProto);
*/
public abstract void getDiagnostics(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc getDelegationToken(.hadoop.common.GetDelegationTokenRequestProto) returns (.hadoop.common.GetDelegationTokenResponseProto);
*/
public abstract void getDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc killJob(.hadoop.mapreduce.KillJobRequestProto) returns (.hadoop.mapreduce.KillJobResponseProto);
*/
public abstract void killJob(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc killTask(.hadoop.mapreduce.KillTaskRequestProto) returns (.hadoop.mapreduce.KillTaskResponseProto);
*/
public abstract void killTask(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc killTaskAttempt(.hadoop.mapreduce.KillTaskAttemptRequestProto) returns (.hadoop.mapreduce.KillTaskAttemptResponseProto);
*/
public abstract void killTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc failTaskAttempt(.hadoop.mapreduce.FailTaskAttemptRequestProto) returns (.hadoop.mapreduce.FailTaskAttemptResponseProto);
*/
public abstract void failTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc renewDelegationToken(.hadoop.common.RenewDelegationTokenRequestProto) returns (.hadoop.common.RenewDelegationTokenResponseProto);
*/
public abstract void renewDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done);
/**
* rpc cancelDelegationToken(.hadoop.common.CancelDelegationTokenRequestProto) returns (.hadoop.common.CancelDelegationTokenResponseProto);
*/
public abstract void cancelDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.yarn.proto.MRClientProtocol.getDescriptor().getServices().get(0);
}
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request,
com.google.protobuf.RpcCallback<
com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.getJobReport(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 1:
this.getTaskReport(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 2:
this.getTaskAttemptReport(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 3:
this.getCounters(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 4:
this.getTaskAttemptCompletionEvents(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 5:
this.getTaskReports(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 6:
this.getDiagnostics(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 7:
this.getDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 8:
this.killJob(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 9:
this.killTask(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 10:
this.killTaskAttempt(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 11:
this.failTaskAttempt(controller, (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 12:
this.renewDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
case 13:
this.cancelDelegationToken(controller, (org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto.getDefaultInstance();
case 3:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto.getDefaultInstance();
case 4:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto.getDefaultInstance();
case 5:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto.getDefaultInstance();
case 6:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto.getDefaultInstance();
case 7:
return org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto.getDefaultInstance();
case 8:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto.getDefaultInstance();
case 9:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto.getDefaultInstance();
case 10:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto.getDefaultInstance();
case 11:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto.getDefaultInstance();
case 12:
return org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto.getDefaultInstance();
case 13:
return org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto.getDefaultInstance();
case 3:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto.getDefaultInstance();
case 4:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto.getDefaultInstance();
case 5:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto.getDefaultInstance();
case 6:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto.getDefaultInstance();
case 7:
return org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance();
case 8:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto.getDefaultInstance();
case 9:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto.getDefaultInstance();
case 10:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto.getDefaultInstance();
case 11:
return org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto.getDefaultInstance();
case 12:
return org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance();
case 13:
return org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService implements Interface {
private Stub(com.google.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.RpcChannel channel;
public com.google.protobuf.RpcChannel getChannel() {
return channel;
}
public void getJobReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto.getDefaultInstance()));
}
public void getTaskReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto.getDefaultInstance()));
}
public void getTaskAttemptReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto.getDefaultInstance()));
}
public void getCounters(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto.getDefaultInstance()));
}
public void getTaskAttemptCompletionEvents(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto.getDefaultInstance()));
}
public void getTaskReports(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto.getDefaultInstance()));
}
public void getDiagnostics(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto.getDefaultInstance()));
}
public void getDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.class,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance()));
}
public void killJob(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto.getDefaultInstance()));
}
public void killTask(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto.getDefaultInstance()));
}
public void killTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto.getDefaultInstance()));
}
public void failTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto.class,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto.getDefaultInstance()));
}
public void renewDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.class,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance()));
}
public void cancelDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
getDescriptor().getMethods().get(13),
controller,
request,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.class,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
com.google.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto getJobReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto getTaskReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto getTaskAttemptReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto getCounters(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto getTaskAttemptCompletionEvents(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto getTaskReports(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto getDiagnostics(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto getDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto killJob(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto killTask(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto killTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto failTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto renewDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto cancelDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request)
throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto getJobReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto getTaskReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto getTaskAttemptReport(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto getCounters(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto getTaskAttemptCompletionEvents(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto getTaskReports(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto getDiagnostics(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto.getDefaultInstance());
}
public org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto getDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto killJob(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto killTask(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto killTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto.getDefaultInstance());
}
public org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto failTaskAttempt(
com.google.protobuf.RpcController controller,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto.getDefaultInstance());
}
public org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto renewDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto.getDefaultInstance());
}
public org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto cancelDelegationToken(
com.google.protobuf.RpcController controller,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(13),
controller,
request,
org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto.getDefaultInstance());
}
}
// @@protoc_insertion_point(class_scope:hadoop.mapreduce.MRClientProtocolService)
}
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\026MRClientProtocol.proto\022\020hadoop.mapredu" +
"ce\032\016Security.proto\032\027mr_service_protos.pr" +
"oto2\334\014\n\027MRClientProtocolService\022g\n\014getJo" +
"bReport\022*.hadoop.mapreduce.GetJobReportR" +
"equestProto\032+.hadoop.mapreduce.GetJobRep" +
"ortResponseProto\022j\n\rgetTaskReport\022+.hado" +
"op.mapreduce.GetTaskReportRequestProto\032," +
".hadoop.mapreduce.GetTaskReportResponseP" +
"roto\022\177\n\024getTaskAttemptReport\0222.hadoop.ma" +
"preduce.GetTaskAttemptReportRequestProto",
"\0323.hadoop.mapreduce.GetTaskAttemptReport" +
"ResponseProto\022d\n\013getCounters\022).hadoop.ma" +
"preduce.GetCountersRequestProto\032*.hadoop" +
".mapreduce.GetCountersResponseProto\022\235\001\n\036" +
"getTaskAttemptCompletionEvents\022<.hadoop." +
"mapreduce.GetTaskAttemptCompletionEvents" +
"RequestProto\032=.hadoop.mapreduce.GetTaskA" +
"ttemptCompletionEventsResponseProto\022m\n\016g" +
"etTaskReports\022,.hadoop.mapreduce.GetTask" +
"ReportsRequestProto\032-.hadoop.mapreduce.G",
"etTaskReportsResponseProto\022m\n\016getDiagnos" +
"tics\022,.hadoop.mapreduce.GetDiagnosticsRe" +
"questProto\032-.hadoop.mapreduce.GetDiagnos" +
"ticsResponseProto\022s\n\022getDelegationToken\022" +
"-.hadoop.common.GetDelegationTokenReques" +
"tProto\032..hadoop.common.GetDelegationToke" +
"nResponseProto\022X\n\007killJob\022%.hadoop.mapre" +
"duce.KillJobRequestProto\032&.hadoop.mapred" +
"uce.KillJobResponseProto\022[\n\010killTask\022&.h" +
"adoop.mapreduce.KillTaskRequestProto\032\'.h",
"adoop.mapreduce.KillTaskResponseProto\022p\n" +
"\017killTaskAttempt\022-.hadoop.mapreduce.Kill" +
"TaskAttemptRequestProto\032..hadoop.mapredu" +
"ce.KillTaskAttemptResponseProto\022p\n\017failT" +
"askAttempt\022-.hadoop.mapreduce.FailTaskAt" +
"temptRequestProto\032..hadoop.mapreduce.Fai" +
"lTaskAttemptResponseProto\022y\n\024renewDelega" +
"tionToken\022/.hadoop.common.RenewDelegatio" +
"nTokenRequestProto\0320.hadoop.common.Renew" +
"DelegationTokenResponseProto\022|\n\025cancelDe",
"legationToken\0220.hadoop.common.CancelDele" +
"gationTokenRequestProto\0321.hadoop.common." +
"CancelDelegationTokenResponseProtoB3\n\034or" +
"g.apache.hadoop.yarn.protoB\020MRClientProt" +
"ocol\210\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}