tensorflow.serving.ModelServiceGrpc Maven / Gradle / Ivy
The newest version!
package tensorflow.serving;
import static io.grpc.MethodDescriptor.generateFullMethodName;
import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall;
import static io.grpc.stub.ClientCalls.asyncClientStreamingCall;
import static io.grpc.stub.ClientCalls.asyncServerStreamingCall;
import static io.grpc.stub.ClientCalls.asyncUnaryCall;
import static io.grpc.stub.ClientCalls.blockingServerStreamingCall;
import static io.grpc.stub.ClientCalls.blockingUnaryCall;
import static io.grpc.stub.ClientCalls.futureUnaryCall;
import static io.grpc.stub.ServerCalls.asyncBidiStreamingCall;
import static io.grpc.stub.ServerCalls.asyncClientStreamingCall;
import static io.grpc.stub.ServerCalls.asyncServerStreamingCall;
import static io.grpc.stub.ServerCalls.asyncUnaryCall;
import static io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall;
import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall;
/**
*
* ModelService provides methods to query and update the state of the server,
* e.g. which models/versions are being served.
*
*/
@javax.annotation.Generated(
value = "by gRPC proto compiler (version 1.18.0)",
comments = "Source: tensorflow_serving/apis/model_service.proto")
public final class ModelServiceGrpc {
private ModelServiceGrpc() {}
public static final String SERVICE_NAME = "tensorflow.serving.ModelService";
// Static method descriptors that strictly reflect the proto.
private static volatile io.grpc.MethodDescriptor getGetModelStatusMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "GetModelStatus",
requestType = tensorflow.serving.GetModelStatus.GetModelStatusRequest.class,
responseType = tensorflow.serving.GetModelStatus.GetModelStatusResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
public static io.grpc.MethodDescriptor getGetModelStatusMethod() {
io.grpc.MethodDescriptor getGetModelStatusMethod;
if ((getGetModelStatusMethod = ModelServiceGrpc.getGetModelStatusMethod) == null) {
synchronized (ModelServiceGrpc.class) {
if ((getGetModelStatusMethod = ModelServiceGrpc.getGetModelStatusMethod) == null) {
ModelServiceGrpc.getGetModelStatusMethod = getGetModelStatusMethod =
io.grpc.MethodDescriptor.newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.UNARY)
.setFullMethodName(generateFullMethodName(
"tensorflow.serving.ModelService", "GetModelStatus"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
tensorflow.serving.GetModelStatus.GetModelStatusRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
tensorflow.serving.GetModelStatus.GetModelStatusResponse.getDefaultInstance()))
.setSchemaDescriptor(new ModelServiceMethodDescriptorSupplier("GetModelStatus"))
.build();
}
}
}
return getGetModelStatusMethod;
}
private static volatile io.grpc.MethodDescriptor getHandleReloadConfigRequestMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "HandleReloadConfigRequest",
requestType = tensorflow.serving.ModelManagement.ReloadConfigRequest.class,
responseType = tensorflow.serving.ModelManagement.ReloadConfigResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
public static io.grpc.MethodDescriptor getHandleReloadConfigRequestMethod() {
io.grpc.MethodDescriptor getHandleReloadConfigRequestMethod;
if ((getHandleReloadConfigRequestMethod = ModelServiceGrpc.getHandleReloadConfigRequestMethod) == null) {
synchronized (ModelServiceGrpc.class) {
if ((getHandleReloadConfigRequestMethod = ModelServiceGrpc.getHandleReloadConfigRequestMethod) == null) {
ModelServiceGrpc.getHandleReloadConfigRequestMethod = getHandleReloadConfigRequestMethod =
io.grpc.MethodDescriptor.newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.UNARY)
.setFullMethodName(generateFullMethodName(
"tensorflow.serving.ModelService", "HandleReloadConfigRequest"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
tensorflow.serving.ModelManagement.ReloadConfigRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
tensorflow.serving.ModelManagement.ReloadConfigResponse.getDefaultInstance()))
.setSchemaDescriptor(new ModelServiceMethodDescriptorSupplier("HandleReloadConfigRequest"))
.build();
}
}
}
return getHandleReloadConfigRequestMethod;
}
/**
* Creates a new async stub that supports all call types for the service
*/
public static ModelServiceStub newStub(io.grpc.Channel channel) {
return new ModelServiceStub(channel);
}
/**
* Creates a new blocking-style stub that supports unary and streaming output calls on the service
*/
public static ModelServiceBlockingStub newBlockingStub(
io.grpc.Channel channel) {
return new ModelServiceBlockingStub(channel);
}
/**
* Creates a new ListenableFuture-style stub that supports unary calls on the service
*/
public static ModelServiceFutureStub newFutureStub(
io.grpc.Channel channel) {
return new ModelServiceFutureStub(channel);
}
/**
*
* ModelService provides methods to query and update the state of the server,
* e.g. which models/versions are being served.
*
*/
public static abstract class ModelServiceImplBase implements io.grpc.BindableService {
/**
*
* Gets status of model. If the ModelSpec in the request does not specify
* version, information about all versions of the model will be returned. If
* the ModelSpec in the request does specify a version, the status of only
* that version will be returned.
*
*/
public void getModelStatus(tensorflow.serving.GetModelStatus.GetModelStatusRequest request,
io.grpc.stub.StreamObserver responseObserver) {
asyncUnimplementedUnaryCall(getGetModelStatusMethod(), responseObserver);
}
/**
*
* Reloads the set of served models. The new config supersedes the old one,
* so if a model is omitted from the new config it will be unloaded and no
* longer served.
*
*/
public void handleReloadConfigRequest(tensorflow.serving.ModelManagement.ReloadConfigRequest request,
io.grpc.stub.StreamObserver responseObserver) {
asyncUnimplementedUnaryCall(getHandleReloadConfigRequestMethod(), responseObserver);
}
@java.lang.Override public final io.grpc.ServerServiceDefinition bindService() {
return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
.addMethod(
getGetModelStatusMethod(),
asyncUnaryCall(
new MethodHandlers<
tensorflow.serving.GetModelStatus.GetModelStatusRequest,
tensorflow.serving.GetModelStatus.GetModelStatusResponse>(
this, METHODID_GET_MODEL_STATUS)))
.addMethod(
getHandleReloadConfigRequestMethod(),
asyncUnaryCall(
new MethodHandlers<
tensorflow.serving.ModelManagement.ReloadConfigRequest,
tensorflow.serving.ModelManagement.ReloadConfigResponse>(
this, METHODID_HANDLE_RELOAD_CONFIG_REQUEST)))
.build();
}
}
/**
*
* ModelService provides methods to query and update the state of the server,
* e.g. which models/versions are being served.
*
*/
public static final class ModelServiceStub extends io.grpc.stub.AbstractStub {
private ModelServiceStub(io.grpc.Channel channel) {
super(channel);
}
private ModelServiceStub(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
super(channel, callOptions);
}
@java.lang.Override
protected ModelServiceStub build(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
return new ModelServiceStub(channel, callOptions);
}
/**
*
* Gets status of model. If the ModelSpec in the request does not specify
* version, information about all versions of the model will be returned. If
* the ModelSpec in the request does specify a version, the status of only
* that version will be returned.
*
*/
public void getModelStatus(tensorflow.serving.GetModelStatus.GetModelStatusRequest request,
io.grpc.stub.StreamObserver responseObserver) {
asyncUnaryCall(
getChannel().newCall(getGetModelStatusMethod(), getCallOptions()), request, responseObserver);
}
/**
*
* Reloads the set of served models. The new config supersedes the old one,
* so if a model is omitted from the new config it will be unloaded and no
* longer served.
*
*/
public void handleReloadConfigRequest(tensorflow.serving.ModelManagement.ReloadConfigRequest request,
io.grpc.stub.StreamObserver responseObserver) {
asyncUnaryCall(
getChannel().newCall(getHandleReloadConfigRequestMethod(), getCallOptions()), request, responseObserver);
}
}
/**
*
* ModelService provides methods to query and update the state of the server,
* e.g. which models/versions are being served.
*
*/
public static final class ModelServiceBlockingStub extends io.grpc.stub.AbstractStub {
private ModelServiceBlockingStub(io.grpc.Channel channel) {
super(channel);
}
private ModelServiceBlockingStub(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
super(channel, callOptions);
}
@java.lang.Override
protected ModelServiceBlockingStub build(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
return new ModelServiceBlockingStub(channel, callOptions);
}
/**
*
* Gets status of model. If the ModelSpec in the request does not specify
* version, information about all versions of the model will be returned. If
* the ModelSpec in the request does specify a version, the status of only
* that version will be returned.
*
*/
public tensorflow.serving.GetModelStatus.GetModelStatusResponse getModelStatus(tensorflow.serving.GetModelStatus.GetModelStatusRequest request) {
return blockingUnaryCall(
getChannel(), getGetModelStatusMethod(), getCallOptions(), request);
}
/**
*
* Reloads the set of served models. The new config supersedes the old one,
* so if a model is omitted from the new config it will be unloaded and no
* longer served.
*
*/
public tensorflow.serving.ModelManagement.ReloadConfigResponse handleReloadConfigRequest(tensorflow.serving.ModelManagement.ReloadConfigRequest request) {
return blockingUnaryCall(
getChannel(), getHandleReloadConfigRequestMethod(), getCallOptions(), request);
}
}
/**
*
* ModelService provides methods to query and update the state of the server,
* e.g. which models/versions are being served.
*
*/
public static final class ModelServiceFutureStub extends io.grpc.stub.AbstractStub {
private ModelServiceFutureStub(io.grpc.Channel channel) {
super(channel);
}
private ModelServiceFutureStub(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
super(channel, callOptions);
}
@java.lang.Override
protected ModelServiceFutureStub build(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
return new ModelServiceFutureStub(channel, callOptions);
}
/**
*
* Gets status of model. If the ModelSpec in the request does not specify
* version, information about all versions of the model will be returned. If
* the ModelSpec in the request does specify a version, the status of only
* that version will be returned.
*
*/
public com.google.common.util.concurrent.ListenableFuture getModelStatus(
tensorflow.serving.GetModelStatus.GetModelStatusRequest request) {
return futureUnaryCall(
getChannel().newCall(getGetModelStatusMethod(), getCallOptions()), request);
}
/**
*
* Reloads the set of served models. The new config supersedes the old one,
* so if a model is omitted from the new config it will be unloaded and no
* longer served.
*
*/
public com.google.common.util.concurrent.ListenableFuture handleReloadConfigRequest(
tensorflow.serving.ModelManagement.ReloadConfigRequest request) {
return futureUnaryCall(
getChannel().newCall(getHandleReloadConfigRequestMethod(), getCallOptions()), request);
}
}
private static final int METHODID_GET_MODEL_STATUS = 0;
private static final int METHODID_HANDLE_RELOAD_CONFIG_REQUEST = 1;
private static final class MethodHandlers implements
io.grpc.stub.ServerCalls.UnaryMethod,
io.grpc.stub.ServerCalls.ServerStreamingMethod,
io.grpc.stub.ServerCalls.ClientStreamingMethod,
io.grpc.stub.ServerCalls.BidiStreamingMethod {
private final ModelServiceImplBase serviceImpl;
private final int methodId;
MethodHandlers(ModelServiceImplBase serviceImpl, int methodId) {
this.serviceImpl = serviceImpl;
this.methodId = methodId;
}
@java.lang.Override
@java.lang.SuppressWarnings("unchecked")
public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) {
switch (methodId) {
case METHODID_GET_MODEL_STATUS:
serviceImpl.getModelStatus((tensorflow.serving.GetModelStatus.GetModelStatusRequest) request,
(io.grpc.stub.StreamObserver) responseObserver);
break;
case METHODID_HANDLE_RELOAD_CONFIG_REQUEST:
serviceImpl.handleReloadConfigRequest((tensorflow.serving.ModelManagement.ReloadConfigRequest) request,
(io.grpc.stub.StreamObserver) responseObserver);
break;
default:
throw new AssertionError();
}
}
@java.lang.Override
@java.lang.SuppressWarnings("unchecked")
public io.grpc.stub.StreamObserver invoke(
io.grpc.stub.StreamObserver responseObserver) {
switch (methodId) {
default:
throw new AssertionError();
}
}
}
private static abstract class ModelServiceBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier {
ModelServiceBaseDescriptorSupplier() {}
@java.lang.Override
public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() {
return tensorflow.serving.ModelServiceOuterClass.getDescriptor();
}
@java.lang.Override
public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() {
return getFileDescriptor().findServiceByName("ModelService");
}
}
private static final class ModelServiceFileDescriptorSupplier
extends ModelServiceBaseDescriptorSupplier {
ModelServiceFileDescriptorSupplier() {}
}
private static final class ModelServiceMethodDescriptorSupplier
extends ModelServiceBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoMethodDescriptorSupplier {
private final String methodName;
ModelServiceMethodDescriptorSupplier(String methodName) {
this.methodName = methodName;
}
@java.lang.Override
public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() {
return getServiceDescriptor().findMethodByName(methodName);
}
}
private static volatile io.grpc.ServiceDescriptor serviceDescriptor;
public static io.grpc.ServiceDescriptor getServiceDescriptor() {
io.grpc.ServiceDescriptor result = serviceDescriptor;
if (result == null) {
synchronized (ModelServiceGrpc.class) {
result = serviceDescriptor;
if (result == null) {
serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME)
.setSchemaDescriptor(new ModelServiceFileDescriptorSupplier())
.addMethod(getGetModelStatusMethod())
.addMethod(getHandleReloadConfigRequestMethod())
.build();
}
}
}
return result;
}
}