All Downloads are FREE. Search and download functionalities are using the official Maven repository.

build.bazel.remote.execution.v2.ContentAddressableStorageGrpc Maven / Gradle / Ivy

package build.bazel.remote.execution.v2;

import static io.grpc.MethodDescriptor.generateFullMethodName;

/**
 * 
 * The CAS (content-addressable storage) is used to store the inputs to and
 * outputs from the execution service. Each piece of content is addressed by the
 * digest of its binary data.
 * Most of the binary data stored in the CAS is opaque to the execution engine,
 * and is only used as a communication medium. In order to build an
 * [Action][build.bazel.remote.execution.v2.Action],
 * however, the client will need to also upload the
 * [Command][build.bazel.remote.execution.v2.Command] and input root
 * [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
 * The Command and Directory messages must be marshalled to wire format and then
 * uploaded under the hash as with any other piece of content. In practice, the
 * input root directory is likely to refer to other Directories in its
 * hierarchy, which must also each be uploaded on their own.
 * For small file uploads the client should group them together and call
 * [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
 * For large uploads, the client must use the
 * [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
 * `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
 * where `instance_name` is as described in the next paragraph, `uuid` is a
 * version 4 UUID generated by the client, and `hash` and `size` are the
 * [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
 * `uuid` is used only to avoid collisions when multiple clients try to upload
 * the same file (or the same client tries to upload the file multiple times at
 * once on different threads), so the client MAY reuse the `uuid` for uploading
 * different blobs. The `resource_name` may optionally have a trailing filename
 * (or other metadata) for a client to use if it is storing URLs, as in
 * `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
 * after the `size` is ignored.
 * A single server MAY support multiple instances of the execution system, each
 * with their own workers, storage, cache, etc. The exact relationship between
 * instances is up to the server. If the server does, then the `instance_name`
 * is an identifier, possibly containing multiple path segments, used to
 * distinguish between the various instances on the server, in a manner defined
 * by the server. For servers which do not support multiple instances, then the
 * `instance_name` is the empty path and the leading slash is omitted, so that
 * the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
 * To simplify parsing, a path segment cannot equal any of the following
 * keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
 * `capabilities`.
 * When attempting an upload, if another client has already completed the upload
 * (which may occur in the middle of a single upload if another client uploads
 * the same blob concurrently), the request will terminate immediately with
 * a response whose `committed_size` is the full size of the uploaded file
 * (regardless of how much data was transmitted by the client). If the client
 * completes the upload but the
 * [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
 * `INVALID_ARGUMENT` error will be returned. In either case, the client should
 * not attempt to retry the upload.
 * For downloading blobs, the client must use the
 * [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
 * a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
 * `instance_name` is the instance name (see above), and `hash` and `size` are
 * the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
 * The lifetime of entries in the CAS is implementation specific, but it SHOULD
 * be long enough to allow for newly-added and recently looked-up entries to be
 * used in subsequent calls (e.g. to
 * [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
 * Servers MUST behave as though empty blobs are always available, even if they
 * have not been uploaded. Clients MAY optimize away the uploading or
 * downloading of empty blobs.
 * As with other services in the Remote Execution API, any call may return an
 * error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
 * information about when the client should retry the request; clients SHOULD
 * respect the information provided.
 * 
*/ @javax.annotation.Generated( value = "by gRPC proto compiler (version 1.65.0)", comments = "Source: build/bazel/remote/execution/v2/remote_execution.proto") @io.grpc.stub.annotations.GrpcGenerated public final class ContentAddressableStorageGrpc { private ContentAddressableStorageGrpc() {} public static final java.lang.String SERVICE_NAME = "build.bazel.remote.execution.v2.ContentAddressableStorage"; // Static method descriptors that strictly reflect the proto. private static volatile io.grpc.MethodDescriptor getFindMissingBlobsMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "FindMissingBlobs", requestType = build.bazel.remote.execution.v2.FindMissingBlobsRequest.class, responseType = build.bazel.remote.execution.v2.FindMissingBlobsResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getFindMissingBlobsMethod() { io.grpc.MethodDescriptor getFindMissingBlobsMethod; if ((getFindMissingBlobsMethod = ContentAddressableStorageGrpc.getFindMissingBlobsMethod) == null) { synchronized (ContentAddressableStorageGrpc.class) { if ((getFindMissingBlobsMethod = ContentAddressableStorageGrpc.getFindMissingBlobsMethod) == null) { ContentAddressableStorageGrpc.getFindMissingBlobsMethod = getFindMissingBlobsMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "FindMissingBlobs")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( build.bazel.remote.execution.v2.FindMissingBlobsRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( build.bazel.remote.execution.v2.FindMissingBlobsResponse.getDefaultInstance())) .setSchemaDescriptor(new ContentAddressableStorageMethodDescriptorSupplier("FindMissingBlobs")) .build(); } } } return getFindMissingBlobsMethod; } private static volatile io.grpc.MethodDescriptor getBatchUpdateBlobsMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "BatchUpdateBlobs", requestType = build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.class, responseType = build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getBatchUpdateBlobsMethod() { io.grpc.MethodDescriptor getBatchUpdateBlobsMethod; if ((getBatchUpdateBlobsMethod = ContentAddressableStorageGrpc.getBatchUpdateBlobsMethod) == null) { synchronized (ContentAddressableStorageGrpc.class) { if ((getBatchUpdateBlobsMethod = ContentAddressableStorageGrpc.getBatchUpdateBlobsMethod) == null) { ContentAddressableStorageGrpc.getBatchUpdateBlobsMethod = getBatchUpdateBlobsMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "BatchUpdateBlobs")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.getDefaultInstance())) .setSchemaDescriptor(new ContentAddressableStorageMethodDescriptorSupplier("BatchUpdateBlobs")) .build(); } } } return getBatchUpdateBlobsMethod; } private static volatile io.grpc.MethodDescriptor getBatchReadBlobsMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "BatchReadBlobs", requestType = build.bazel.remote.execution.v2.BatchReadBlobsRequest.class, responseType = build.bazel.remote.execution.v2.BatchReadBlobsResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getBatchReadBlobsMethod() { io.grpc.MethodDescriptor getBatchReadBlobsMethod; if ((getBatchReadBlobsMethod = ContentAddressableStorageGrpc.getBatchReadBlobsMethod) == null) { synchronized (ContentAddressableStorageGrpc.class) { if ((getBatchReadBlobsMethod = ContentAddressableStorageGrpc.getBatchReadBlobsMethod) == null) { ContentAddressableStorageGrpc.getBatchReadBlobsMethod = getBatchReadBlobsMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "BatchReadBlobs")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( build.bazel.remote.execution.v2.BatchReadBlobsRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( build.bazel.remote.execution.v2.BatchReadBlobsResponse.getDefaultInstance())) .setSchemaDescriptor(new ContentAddressableStorageMethodDescriptorSupplier("BatchReadBlobs")) .build(); } } } return getBatchReadBlobsMethod; } private static volatile io.grpc.MethodDescriptor getGetTreeMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "GetTree", requestType = build.bazel.remote.execution.v2.GetTreeRequest.class, responseType = build.bazel.remote.execution.v2.GetTreeResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) public static io.grpc.MethodDescriptor getGetTreeMethod() { io.grpc.MethodDescriptor getGetTreeMethod; if ((getGetTreeMethod = ContentAddressableStorageGrpc.getGetTreeMethod) == null) { synchronized (ContentAddressableStorageGrpc.class) { if ((getGetTreeMethod = ContentAddressableStorageGrpc.getGetTreeMethod) == null) { ContentAddressableStorageGrpc.getGetTreeMethod = getGetTreeMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetTree")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( build.bazel.remote.execution.v2.GetTreeRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( build.bazel.remote.execution.v2.GetTreeResponse.getDefaultInstance())) .setSchemaDescriptor(new ContentAddressableStorageMethodDescriptorSupplier("GetTree")) .build(); } } } return getGetTreeMethod; } /** * Creates a new async stub that supports all call types for the service */ public static ContentAddressableStorageStub newStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public ContentAddressableStorageStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new ContentAddressableStorageStub(channel, callOptions); } }; return ContentAddressableStorageStub.newStub(factory, channel); } /** * Creates a new blocking-style stub that supports unary and streaming output calls on the service */ public static ContentAddressableStorageBlockingStub newBlockingStub( io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public ContentAddressableStorageBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new ContentAddressableStorageBlockingStub(channel, callOptions); } }; return ContentAddressableStorageBlockingStub.newStub(factory, channel); } /** * Creates a new ListenableFuture-style stub that supports unary calls on the service */ public static ContentAddressableStorageFutureStub newFutureStub( io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public ContentAddressableStorageFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new ContentAddressableStorageFutureStub(channel, callOptions); } }; return ContentAddressableStorageFutureStub.newStub(factory, channel); } /** *
   * The CAS (content-addressable storage) is used to store the inputs to and
   * outputs from the execution service. Each piece of content is addressed by the
   * digest of its binary data.
   * Most of the binary data stored in the CAS is opaque to the execution engine,
   * and is only used as a communication medium. In order to build an
   * [Action][build.bazel.remote.execution.v2.Action],
   * however, the client will need to also upload the
   * [Command][build.bazel.remote.execution.v2.Command] and input root
   * [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
   * The Command and Directory messages must be marshalled to wire format and then
   * uploaded under the hash as with any other piece of content. In practice, the
   * input root directory is likely to refer to other Directories in its
   * hierarchy, which must also each be uploaded on their own.
   * For small file uploads the client should group them together and call
   * [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
   * For large uploads, the client must use the
   * [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
   * `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
   * where `instance_name` is as described in the next paragraph, `uuid` is a
   * version 4 UUID generated by the client, and `hash` and `size` are the
   * [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
   * `uuid` is used only to avoid collisions when multiple clients try to upload
   * the same file (or the same client tries to upload the file multiple times at
   * once on different threads), so the client MAY reuse the `uuid` for uploading
   * different blobs. The `resource_name` may optionally have a trailing filename
   * (or other metadata) for a client to use if it is storing URLs, as in
   * `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
   * after the `size` is ignored.
   * A single server MAY support multiple instances of the execution system, each
   * with their own workers, storage, cache, etc. The exact relationship between
   * instances is up to the server. If the server does, then the `instance_name`
   * is an identifier, possibly containing multiple path segments, used to
   * distinguish between the various instances on the server, in a manner defined
   * by the server. For servers which do not support multiple instances, then the
   * `instance_name` is the empty path and the leading slash is omitted, so that
   * the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
   * To simplify parsing, a path segment cannot equal any of the following
   * keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
   * `capabilities`.
   * When attempting an upload, if another client has already completed the upload
   * (which may occur in the middle of a single upload if another client uploads
   * the same blob concurrently), the request will terminate immediately with
   * a response whose `committed_size` is the full size of the uploaded file
   * (regardless of how much data was transmitted by the client). If the client
   * completes the upload but the
   * [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
   * `INVALID_ARGUMENT` error will be returned. In either case, the client should
   * not attempt to retry the upload.
   * For downloading blobs, the client must use the
   * [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
   * a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
   * `instance_name` is the instance name (see above), and `hash` and `size` are
   * the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
   * The lifetime of entries in the CAS is implementation specific, but it SHOULD
   * be long enough to allow for newly-added and recently looked-up entries to be
   * used in subsequent calls (e.g. to
   * [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
   * Servers MUST behave as though empty blobs are always available, even if they
   * have not been uploaded. Clients MAY optimize away the uploading or
   * downloading of empty blobs.
   * As with other services in the Remote Execution API, any call may return an
   * error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
   * information about when the client should retry the request; clients SHOULD
   * respect the information provided.
   * 
*/ public interface AsyncService { /** *
     * Determine if blobs are present in the CAS.
     * Clients can use this API before uploading blobs to determine which ones are
     * already present in the CAS and do not need to be uploaded again.
     * Servers SHOULD increase the lifetimes of the referenced blobs if necessary and
     * applicable.
     * There are no method-specific errors.
     * 
*/ default void findMissingBlobs(build.bazel.remote.execution.v2.FindMissingBlobsRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getFindMissingBlobsMethod(), responseObserver); } /** *
     * Upload many blobs at once.
     * The server may enforce a limit of the combined total size of blobs
     * to be uploaded using this API. This limit may be obtained using the
     * [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
     * Requests exceeding the limit should either be split into smaller
     * chunks or uploaded using the
     * [ByteStream API][google.bytestream.ByteStream], as appropriate.
     * This request is equivalent to calling a Bytestream `Write` request
     * on each individual blob, in parallel. The requests may succeed or fail
     * independently.
     * Errors:
     * * `INVALID_ARGUMENT`: The client attempted to upload more than the
     *   server supported limit.
     * Individual requests may return the following errors, additionally:
     * * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
     * * `INVALID_ARGUMENT`: The
     * [Digest][build.bazel.remote.execution.v2.Digest] does not match the
     * provided data.
     * 
*/ default void batchUpdateBlobs(build.bazel.remote.execution.v2.BatchUpdateBlobsRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getBatchUpdateBlobsMethod(), responseObserver); } /** *
     * Download many blobs at once.
     * The server may enforce a limit of the combined total size of blobs
     * to be downloaded using this API. This limit may be obtained using the
     * [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
     * Requests exceeding the limit should either be split into smaller
     * chunks or downloaded using the
     * [ByteStream API][google.bytestream.ByteStream], as appropriate.
     * This request is equivalent to calling a Bytestream `Read` request
     * on each individual blob, in parallel. The requests may succeed or fail
     * independently.
     * Errors:
     * * `INVALID_ARGUMENT`: The client attempted to read more than the
     *   server supported limit.
     * Every error on individual read will be returned in the corresponding digest
     * status.
     * 
*/ default void batchReadBlobs(build.bazel.remote.execution.v2.BatchReadBlobsRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getBatchReadBlobsMethod(), responseObserver); } /** *
     * Fetch the entire directory tree rooted at a node.
     * This request must be targeted at a
     * [Directory][build.bazel.remote.execution.v2.Directory] stored in the
     * [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
     * (CAS). The server will enumerate the `Directory` tree recursively and
     * return every node descended from the root.
     * The GetTreeRequest.page_token parameter can be used to skip ahead in
     * the stream (e.g. when retrying a partially completed and aborted request),
     * by setting it to a value taken from GetTreeResponse.next_page_token of the
     * last successfully processed GetTreeResponse).
     * The exact traversal order is unspecified and, unless retrieving subsequent
     * pages from an earlier request, is not guaranteed to be stable across
     * multiple invocations of `GetTree`.
     * If part of the tree is missing from the CAS, the server will return the
     * portion present and omit the rest.
     * Errors:
     * * `NOT_FOUND`: The requested tree root is not present in the CAS.
     * 
*/ default void getTree(build.bazel.remote.execution.v2.GetTreeRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getGetTreeMethod(), responseObserver); } } /** * Base class for the server implementation of the service ContentAddressableStorage. *
   * The CAS (content-addressable storage) is used to store the inputs to and
   * outputs from the execution service. Each piece of content is addressed by the
   * digest of its binary data.
   * Most of the binary data stored in the CAS is opaque to the execution engine,
   * and is only used as a communication medium. In order to build an
   * [Action][build.bazel.remote.execution.v2.Action],
   * however, the client will need to also upload the
   * [Command][build.bazel.remote.execution.v2.Command] and input root
   * [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
   * The Command and Directory messages must be marshalled to wire format and then
   * uploaded under the hash as with any other piece of content. In practice, the
   * input root directory is likely to refer to other Directories in its
   * hierarchy, which must also each be uploaded on their own.
   * For small file uploads the client should group them together and call
   * [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
   * For large uploads, the client must use the
   * [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
   * `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
   * where `instance_name` is as described in the next paragraph, `uuid` is a
   * version 4 UUID generated by the client, and `hash` and `size` are the
   * [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
   * `uuid` is used only to avoid collisions when multiple clients try to upload
   * the same file (or the same client tries to upload the file multiple times at
   * once on different threads), so the client MAY reuse the `uuid` for uploading
   * different blobs. The `resource_name` may optionally have a trailing filename
   * (or other metadata) for a client to use if it is storing URLs, as in
   * `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
   * after the `size` is ignored.
   * A single server MAY support multiple instances of the execution system, each
   * with their own workers, storage, cache, etc. The exact relationship between
   * instances is up to the server. If the server does, then the `instance_name`
   * is an identifier, possibly containing multiple path segments, used to
   * distinguish between the various instances on the server, in a manner defined
   * by the server. For servers which do not support multiple instances, then the
   * `instance_name` is the empty path and the leading slash is omitted, so that
   * the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
   * To simplify parsing, a path segment cannot equal any of the following
   * keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
   * `capabilities`.
   * When attempting an upload, if another client has already completed the upload
   * (which may occur in the middle of a single upload if another client uploads
   * the same blob concurrently), the request will terminate immediately with
   * a response whose `committed_size` is the full size of the uploaded file
   * (regardless of how much data was transmitted by the client). If the client
   * completes the upload but the
   * [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
   * `INVALID_ARGUMENT` error will be returned. In either case, the client should
   * not attempt to retry the upload.
   * For downloading blobs, the client must use the
   * [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
   * a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
   * `instance_name` is the instance name (see above), and `hash` and `size` are
   * the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
   * The lifetime of entries in the CAS is implementation specific, but it SHOULD
   * be long enough to allow for newly-added and recently looked-up entries to be
   * used in subsequent calls (e.g. to
   * [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
   * Servers MUST behave as though empty blobs are always available, even if they
   * have not been uploaded. Clients MAY optimize away the uploading or
   * downloading of empty blobs.
   * As with other services in the Remote Execution API, any call may return an
   * error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
   * information about when the client should retry the request; clients SHOULD
   * respect the information provided.
   * 
*/ public static abstract class ContentAddressableStorageImplBase implements io.grpc.BindableService, AsyncService { @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { return ContentAddressableStorageGrpc.bindService(this); } } /** * A stub to allow clients to do asynchronous rpc calls to service ContentAddressableStorage. *
   * The CAS (content-addressable storage) is used to store the inputs to and
   * outputs from the execution service. Each piece of content is addressed by the
   * digest of its binary data.
   * Most of the binary data stored in the CAS is opaque to the execution engine,
   * and is only used as a communication medium. In order to build an
   * [Action][build.bazel.remote.execution.v2.Action],
   * however, the client will need to also upload the
   * [Command][build.bazel.remote.execution.v2.Command] and input root
   * [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
   * The Command and Directory messages must be marshalled to wire format and then
   * uploaded under the hash as with any other piece of content. In practice, the
   * input root directory is likely to refer to other Directories in its
   * hierarchy, which must also each be uploaded on their own.
   * For small file uploads the client should group them together and call
   * [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
   * For large uploads, the client must use the
   * [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
   * `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
   * where `instance_name` is as described in the next paragraph, `uuid` is a
   * version 4 UUID generated by the client, and `hash` and `size` are the
   * [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
   * `uuid` is used only to avoid collisions when multiple clients try to upload
   * the same file (or the same client tries to upload the file multiple times at
   * once on different threads), so the client MAY reuse the `uuid` for uploading
   * different blobs. The `resource_name` may optionally have a trailing filename
   * (or other metadata) for a client to use if it is storing URLs, as in
   * `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
   * after the `size` is ignored.
   * A single server MAY support multiple instances of the execution system, each
   * with their own workers, storage, cache, etc. The exact relationship between
   * instances is up to the server. If the server does, then the `instance_name`
   * is an identifier, possibly containing multiple path segments, used to
   * distinguish between the various instances on the server, in a manner defined
   * by the server. For servers which do not support multiple instances, then the
   * `instance_name` is the empty path and the leading slash is omitted, so that
   * the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
   * To simplify parsing, a path segment cannot equal any of the following
   * keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
   * `capabilities`.
   * When attempting an upload, if another client has already completed the upload
   * (which may occur in the middle of a single upload if another client uploads
   * the same blob concurrently), the request will terminate immediately with
   * a response whose `committed_size` is the full size of the uploaded file
   * (regardless of how much data was transmitted by the client). If the client
   * completes the upload but the
   * [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
   * `INVALID_ARGUMENT` error will be returned. In either case, the client should
   * not attempt to retry the upload.
   * For downloading blobs, the client must use the
   * [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
   * a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
   * `instance_name` is the instance name (see above), and `hash` and `size` are
   * the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
   * The lifetime of entries in the CAS is implementation specific, but it SHOULD
   * be long enough to allow for newly-added and recently looked-up entries to be
   * used in subsequent calls (e.g. to
   * [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
   * Servers MUST behave as though empty blobs are always available, even if they
   * have not been uploaded. Clients MAY optimize away the uploading or
   * downloading of empty blobs.
   * As with other services in the Remote Execution API, any call may return an
   * error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
   * information about when the client should retry the request; clients SHOULD
   * respect the information provided.
   * 
*/ public static final class ContentAddressableStorageStub extends io.grpc.stub.AbstractAsyncStub { private ContentAddressableStorageStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected ContentAddressableStorageStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new ContentAddressableStorageStub(channel, callOptions); } /** *
     * Determine if blobs are present in the CAS.
     * Clients can use this API before uploading blobs to determine which ones are
     * already present in the CAS and do not need to be uploaded again.
     * Servers SHOULD increase the lifetimes of the referenced blobs if necessary and
     * applicable.
     * There are no method-specific errors.
     * 
*/ public void findMissingBlobs(build.bazel.remote.execution.v2.FindMissingBlobsRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getFindMissingBlobsMethod(), getCallOptions()), request, responseObserver); } /** *
     * Upload many blobs at once.
     * The server may enforce a limit of the combined total size of blobs
     * to be uploaded using this API. This limit may be obtained using the
     * [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
     * Requests exceeding the limit should either be split into smaller
     * chunks or uploaded using the
     * [ByteStream API][google.bytestream.ByteStream], as appropriate.
     * This request is equivalent to calling a Bytestream `Write` request
     * on each individual blob, in parallel. The requests may succeed or fail
     * independently.
     * Errors:
     * * `INVALID_ARGUMENT`: The client attempted to upload more than the
     *   server supported limit.
     * Individual requests may return the following errors, additionally:
     * * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
     * * `INVALID_ARGUMENT`: The
     * [Digest][build.bazel.remote.execution.v2.Digest] does not match the
     * provided data.
     * 
*/ public void batchUpdateBlobs(build.bazel.remote.execution.v2.BatchUpdateBlobsRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getBatchUpdateBlobsMethod(), getCallOptions()), request, responseObserver); } /** *
     * Download many blobs at once.
     * The server may enforce a limit of the combined total size of blobs
     * to be downloaded using this API. This limit may be obtained using the
     * [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
     * Requests exceeding the limit should either be split into smaller
     * chunks or downloaded using the
     * [ByteStream API][google.bytestream.ByteStream], as appropriate.
     * This request is equivalent to calling a Bytestream `Read` request
     * on each individual blob, in parallel. The requests may succeed or fail
     * independently.
     * Errors:
     * * `INVALID_ARGUMENT`: The client attempted to read more than the
     *   server supported limit.
     * Every error on individual read will be returned in the corresponding digest
     * status.
     * 
*/ public void batchReadBlobs(build.bazel.remote.execution.v2.BatchReadBlobsRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getBatchReadBlobsMethod(), getCallOptions()), request, responseObserver); } /** *
     * Fetch the entire directory tree rooted at a node.
     * This request must be targeted at a
     * [Directory][build.bazel.remote.execution.v2.Directory] stored in the
     * [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
     * (CAS). The server will enumerate the `Directory` tree recursively and
     * return every node descended from the root.
     * The GetTreeRequest.page_token parameter can be used to skip ahead in
     * the stream (e.g. when retrying a partially completed and aborted request),
     * by setting it to a value taken from GetTreeResponse.next_page_token of the
     * last successfully processed GetTreeResponse).
     * The exact traversal order is unspecified and, unless retrieving subsequent
     * pages from an earlier request, is not guaranteed to be stable across
     * multiple invocations of `GetTree`.
     * If part of the tree is missing from the CAS, the server will return the
     * portion present and omit the rest.
     * Errors:
     * * `NOT_FOUND`: The requested tree root is not present in the CAS.
     * 
*/ public void getTree(build.bazel.remote.execution.v2.GetTreeRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncServerStreamingCall( getChannel().newCall(getGetTreeMethod(), getCallOptions()), request, responseObserver); } } /** * A stub to allow clients to do synchronous rpc calls to service ContentAddressableStorage. *
   * The CAS (content-addressable storage) is used to store the inputs to and
   * outputs from the execution service. Each piece of content is addressed by the
   * digest of its binary data.
   * Most of the binary data stored in the CAS is opaque to the execution engine,
   * and is only used as a communication medium. In order to build an
   * [Action][build.bazel.remote.execution.v2.Action],
   * however, the client will need to also upload the
   * [Command][build.bazel.remote.execution.v2.Command] and input root
   * [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
   * The Command and Directory messages must be marshalled to wire format and then
   * uploaded under the hash as with any other piece of content. In practice, the
   * input root directory is likely to refer to other Directories in its
   * hierarchy, which must also each be uploaded on their own.
   * For small file uploads the client should group them together and call
   * [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
   * For large uploads, the client must use the
   * [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
   * `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
   * where `instance_name` is as described in the next paragraph, `uuid` is a
   * version 4 UUID generated by the client, and `hash` and `size` are the
   * [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
   * `uuid` is used only to avoid collisions when multiple clients try to upload
   * the same file (or the same client tries to upload the file multiple times at
   * once on different threads), so the client MAY reuse the `uuid` for uploading
   * different blobs. The `resource_name` may optionally have a trailing filename
   * (or other metadata) for a client to use if it is storing URLs, as in
   * `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
   * after the `size` is ignored.
   * A single server MAY support multiple instances of the execution system, each
   * with their own workers, storage, cache, etc. The exact relationship between
   * instances is up to the server. If the server does, then the `instance_name`
   * is an identifier, possibly containing multiple path segments, used to
   * distinguish between the various instances on the server, in a manner defined
   * by the server. For servers which do not support multiple instances, then the
   * `instance_name` is the empty path and the leading slash is omitted, so that
   * the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
   * To simplify parsing, a path segment cannot equal any of the following
   * keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
   * `capabilities`.
   * When attempting an upload, if another client has already completed the upload
   * (which may occur in the middle of a single upload if another client uploads
   * the same blob concurrently), the request will terminate immediately with
   * a response whose `committed_size` is the full size of the uploaded file
   * (regardless of how much data was transmitted by the client). If the client
   * completes the upload but the
   * [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
   * `INVALID_ARGUMENT` error will be returned. In either case, the client should
   * not attempt to retry the upload.
   * For downloading blobs, the client must use the
   * [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
   * a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
   * `instance_name` is the instance name (see above), and `hash` and `size` are
   * the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
   * The lifetime of entries in the CAS is implementation specific, but it SHOULD
   * be long enough to allow for newly-added and recently looked-up entries to be
   * used in subsequent calls (e.g. to
   * [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
   * Servers MUST behave as though empty blobs are always available, even if they
   * have not been uploaded. Clients MAY optimize away the uploading or
   * downloading of empty blobs.
   * As with other services in the Remote Execution API, any call may return an
   * error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
   * information about when the client should retry the request; clients SHOULD
   * respect the information provided.
   * 
*/ public static final class ContentAddressableStorageBlockingStub extends io.grpc.stub.AbstractBlockingStub { private ContentAddressableStorageBlockingStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected ContentAddressableStorageBlockingStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new ContentAddressableStorageBlockingStub(channel, callOptions); } /** *
     * Determine if blobs are present in the CAS.
     * Clients can use this API before uploading blobs to determine which ones are
     * already present in the CAS and do not need to be uploaded again.
     * Servers SHOULD increase the lifetimes of the referenced blobs if necessary and
     * applicable.
     * There are no method-specific errors.
     * 
*/ public build.bazel.remote.execution.v2.FindMissingBlobsResponse findMissingBlobs(build.bazel.remote.execution.v2.FindMissingBlobsRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getFindMissingBlobsMethod(), getCallOptions(), request); } /** *
     * Upload many blobs at once.
     * The server may enforce a limit of the combined total size of blobs
     * to be uploaded using this API. This limit may be obtained using the
     * [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
     * Requests exceeding the limit should either be split into smaller
     * chunks or uploaded using the
     * [ByteStream API][google.bytestream.ByteStream], as appropriate.
     * This request is equivalent to calling a Bytestream `Write` request
     * on each individual blob, in parallel. The requests may succeed or fail
     * independently.
     * Errors:
     * * `INVALID_ARGUMENT`: The client attempted to upload more than the
     *   server supported limit.
     * Individual requests may return the following errors, additionally:
     * * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
     * * `INVALID_ARGUMENT`: The
     * [Digest][build.bazel.remote.execution.v2.Digest] does not match the
     * provided data.
     * 
*/ public build.bazel.remote.execution.v2.BatchUpdateBlobsResponse batchUpdateBlobs(build.bazel.remote.execution.v2.BatchUpdateBlobsRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getBatchUpdateBlobsMethod(), getCallOptions(), request); } /** *
     * Download many blobs at once.
     * The server may enforce a limit of the combined total size of blobs
     * to be downloaded using this API. This limit may be obtained using the
     * [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
     * Requests exceeding the limit should either be split into smaller
     * chunks or downloaded using the
     * [ByteStream API][google.bytestream.ByteStream], as appropriate.
     * This request is equivalent to calling a Bytestream `Read` request
     * on each individual blob, in parallel. The requests may succeed or fail
     * independently.
     * Errors:
     * * `INVALID_ARGUMENT`: The client attempted to read more than the
     *   server supported limit.
     * Every error on individual read will be returned in the corresponding digest
     * status.
     * 
*/ public build.bazel.remote.execution.v2.BatchReadBlobsResponse batchReadBlobs(build.bazel.remote.execution.v2.BatchReadBlobsRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getBatchReadBlobsMethod(), getCallOptions(), request); } /** *
     * Fetch the entire directory tree rooted at a node.
     * This request must be targeted at a
     * [Directory][build.bazel.remote.execution.v2.Directory] stored in the
     * [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
     * (CAS). The server will enumerate the `Directory` tree recursively and
     * return every node descended from the root.
     * The GetTreeRequest.page_token parameter can be used to skip ahead in
     * the stream (e.g. when retrying a partially completed and aborted request),
     * by setting it to a value taken from GetTreeResponse.next_page_token of the
     * last successfully processed GetTreeResponse).
     * The exact traversal order is unspecified and, unless retrieving subsequent
     * pages from an earlier request, is not guaranteed to be stable across
     * multiple invocations of `GetTree`.
     * If part of the tree is missing from the CAS, the server will return the
     * portion present and omit the rest.
     * Errors:
     * * `NOT_FOUND`: The requested tree root is not present in the CAS.
     * 
*/ public java.util.Iterator getTree( build.bazel.remote.execution.v2.GetTreeRequest request) { return io.grpc.stub.ClientCalls.blockingServerStreamingCall( getChannel(), getGetTreeMethod(), getCallOptions(), request); } } /** * A stub to allow clients to do ListenableFuture-style rpc calls to service ContentAddressableStorage. *
   * The CAS (content-addressable storage) is used to store the inputs to and
   * outputs from the execution service. Each piece of content is addressed by the
   * digest of its binary data.
   * Most of the binary data stored in the CAS is opaque to the execution engine,
   * and is only used as a communication medium. In order to build an
   * [Action][build.bazel.remote.execution.v2.Action],
   * however, the client will need to also upload the
   * [Command][build.bazel.remote.execution.v2.Command] and input root
   * [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
   * The Command and Directory messages must be marshalled to wire format and then
   * uploaded under the hash as with any other piece of content. In practice, the
   * input root directory is likely to refer to other Directories in its
   * hierarchy, which must also each be uploaded on their own.
   * For small file uploads the client should group them together and call
   * [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
   * For large uploads, the client must use the
   * [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
   * `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
   * where `instance_name` is as described in the next paragraph, `uuid` is a
   * version 4 UUID generated by the client, and `hash` and `size` are the
   * [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
   * `uuid` is used only to avoid collisions when multiple clients try to upload
   * the same file (or the same client tries to upload the file multiple times at
   * once on different threads), so the client MAY reuse the `uuid` for uploading
   * different blobs. The `resource_name` may optionally have a trailing filename
   * (or other metadata) for a client to use if it is storing URLs, as in
   * `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
   * after the `size` is ignored.
   * A single server MAY support multiple instances of the execution system, each
   * with their own workers, storage, cache, etc. The exact relationship between
   * instances is up to the server. If the server does, then the `instance_name`
   * is an identifier, possibly containing multiple path segments, used to
   * distinguish between the various instances on the server, in a manner defined
   * by the server. For servers which do not support multiple instances, then the
   * `instance_name` is the empty path and the leading slash is omitted, so that
   * the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
   * To simplify parsing, a path segment cannot equal any of the following
   * keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
   * `capabilities`.
   * When attempting an upload, if another client has already completed the upload
   * (which may occur in the middle of a single upload if another client uploads
   * the same blob concurrently), the request will terminate immediately with
   * a response whose `committed_size` is the full size of the uploaded file
   * (regardless of how much data was transmitted by the client). If the client
   * completes the upload but the
   * [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
   * `INVALID_ARGUMENT` error will be returned. In either case, the client should
   * not attempt to retry the upload.
   * For downloading blobs, the client must use the
   * [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
   * a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
   * `instance_name` is the instance name (see above), and `hash` and `size` are
   * the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
   * The lifetime of entries in the CAS is implementation specific, but it SHOULD
   * be long enough to allow for newly-added and recently looked-up entries to be
   * used in subsequent calls (e.g. to
   * [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
   * Servers MUST behave as though empty blobs are always available, even if they
   * have not been uploaded. Clients MAY optimize away the uploading or
   * downloading of empty blobs.
   * As with other services in the Remote Execution API, any call may return an
   * error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
   * information about when the client should retry the request; clients SHOULD
   * respect the information provided.
   * 
*/ public static final class ContentAddressableStorageFutureStub extends io.grpc.stub.AbstractFutureStub { private ContentAddressableStorageFutureStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected ContentAddressableStorageFutureStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new ContentAddressableStorageFutureStub(channel, callOptions); } /** *
     * Determine if blobs are present in the CAS.
     * Clients can use this API before uploading blobs to determine which ones are
     * already present in the CAS and do not need to be uploaded again.
     * Servers SHOULD increase the lifetimes of the referenced blobs if necessary and
     * applicable.
     * There are no method-specific errors.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture findMissingBlobs( build.bazel.remote.execution.v2.FindMissingBlobsRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getFindMissingBlobsMethod(), getCallOptions()), request); } /** *
     * Upload many blobs at once.
     * The server may enforce a limit of the combined total size of blobs
     * to be uploaded using this API. This limit may be obtained using the
     * [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
     * Requests exceeding the limit should either be split into smaller
     * chunks or uploaded using the
     * [ByteStream API][google.bytestream.ByteStream], as appropriate.
     * This request is equivalent to calling a Bytestream `Write` request
     * on each individual blob, in parallel. The requests may succeed or fail
     * independently.
     * Errors:
     * * `INVALID_ARGUMENT`: The client attempted to upload more than the
     *   server supported limit.
     * Individual requests may return the following errors, additionally:
     * * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
     * * `INVALID_ARGUMENT`: The
     * [Digest][build.bazel.remote.execution.v2.Digest] does not match the
     * provided data.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture batchUpdateBlobs( build.bazel.remote.execution.v2.BatchUpdateBlobsRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getBatchUpdateBlobsMethod(), getCallOptions()), request); } /** *
     * Download many blobs at once.
     * The server may enforce a limit of the combined total size of blobs
     * to be downloaded using this API. This limit may be obtained using the
     * [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
     * Requests exceeding the limit should either be split into smaller
     * chunks or downloaded using the
     * [ByteStream API][google.bytestream.ByteStream], as appropriate.
     * This request is equivalent to calling a Bytestream `Read` request
     * on each individual blob, in parallel. The requests may succeed or fail
     * independently.
     * Errors:
     * * `INVALID_ARGUMENT`: The client attempted to read more than the
     *   server supported limit.
     * Every error on individual read will be returned in the corresponding digest
     * status.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture batchReadBlobs( build.bazel.remote.execution.v2.BatchReadBlobsRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getBatchReadBlobsMethod(), getCallOptions()), request); } } private static final int METHODID_FIND_MISSING_BLOBS = 0; private static final int METHODID_BATCH_UPDATE_BLOBS = 1; private static final int METHODID_BATCH_READ_BLOBS = 2; private static final int METHODID_GET_TREE = 3; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, io.grpc.stub.ServerCalls.ServerStreamingMethod, io.grpc.stub.ServerCalls.ClientStreamingMethod, io.grpc.stub.ServerCalls.BidiStreamingMethod { private final AsyncService serviceImpl; private final int methodId; MethodHandlers(AsyncService serviceImpl, int methodId) { this.serviceImpl = serviceImpl; this.methodId = methodId; } @java.lang.Override @java.lang.SuppressWarnings("unchecked") public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { switch (methodId) { case METHODID_FIND_MISSING_BLOBS: serviceImpl.findMissingBlobs((build.bazel.remote.execution.v2.FindMissingBlobsRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_BATCH_UPDATE_BLOBS: serviceImpl.batchUpdateBlobs((build.bazel.remote.execution.v2.BatchUpdateBlobsRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_BATCH_READ_BLOBS: serviceImpl.batchReadBlobs((build.bazel.remote.execution.v2.BatchReadBlobsRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_GET_TREE: serviceImpl.getTree((build.bazel.remote.execution.v2.GetTreeRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; default: throw new AssertionError(); } } @java.lang.Override @java.lang.SuppressWarnings("unchecked") public io.grpc.stub.StreamObserver invoke( io.grpc.stub.StreamObserver responseObserver) { switch (methodId) { default: throw new AssertionError(); } } } public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) .addMethod( getFindMissingBlobsMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< build.bazel.remote.execution.v2.FindMissingBlobsRequest, build.bazel.remote.execution.v2.FindMissingBlobsResponse>( service, METHODID_FIND_MISSING_BLOBS))) .addMethod( getBatchUpdateBlobsMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< build.bazel.remote.execution.v2.BatchUpdateBlobsRequest, build.bazel.remote.execution.v2.BatchUpdateBlobsResponse>( service, METHODID_BATCH_UPDATE_BLOBS))) .addMethod( getBatchReadBlobsMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< build.bazel.remote.execution.v2.BatchReadBlobsRequest, build.bazel.remote.execution.v2.BatchReadBlobsResponse>( service, METHODID_BATCH_READ_BLOBS))) .addMethod( getGetTreeMethod(), io.grpc.stub.ServerCalls.asyncServerStreamingCall( new MethodHandlers< build.bazel.remote.execution.v2.GetTreeRequest, build.bazel.remote.execution.v2.GetTreeResponse>( service, METHODID_GET_TREE))) .build(); } private static abstract class ContentAddressableStorageBaseDescriptorSupplier implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { ContentAddressableStorageBaseDescriptorSupplier() {} @java.lang.Override public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { return build.bazel.remote.execution.v2.RemoteExecutionProto.getDescriptor(); } @java.lang.Override public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { return getFileDescriptor().findServiceByName("ContentAddressableStorage"); } } private static final class ContentAddressableStorageFileDescriptorSupplier extends ContentAddressableStorageBaseDescriptorSupplier { ContentAddressableStorageFileDescriptorSupplier() {} } private static final class ContentAddressableStorageMethodDescriptorSupplier extends ContentAddressableStorageBaseDescriptorSupplier implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { private final java.lang.String methodName; ContentAddressableStorageMethodDescriptorSupplier(java.lang.String methodName) { this.methodName = methodName; } @java.lang.Override public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { return getServiceDescriptor().findMethodByName(methodName); } } private static volatile io.grpc.ServiceDescriptor serviceDescriptor; public static io.grpc.ServiceDescriptor getServiceDescriptor() { io.grpc.ServiceDescriptor result = serviceDescriptor; if (result == null) { synchronized (ContentAddressableStorageGrpc.class) { result = serviceDescriptor; if (result == null) { serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) .setSchemaDescriptor(new ContentAddressableStorageFileDescriptorSupplier()) .addMethod(getFindMissingBlobsMethod()) .addMethod(getBatchUpdateBlobsMethod()) .addMethod(getBatchReadBlobsMethod()) .addMethod(getGetTreeMethod()) .build(); } } } return result; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy