All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.finos.tracdap.api.TracDataApiGrpc Maven / Gradle / Ivy

Go to download

TRAC D.A.P. platform API, contains service API definitions for the TRAC platform services

The newest version!
package org.finos.tracdap.api;

import static io.grpc.MethodDescriptor.generateFullMethodName;

/**
 * 
 **
 * Public API for creating, updating, reading and querying primary data stored in the TRAC platform.
 * The TRAC data API provides a standard mechanism for client applications to store and access data
 * in the TRAC platform. Calls are translated into the underlying storage mechanisms, using push-down
 * operations where possible for efficient queries on large datasets.
 * The data API includes format translation, so data can be uploaded and retrieved in any supported
 * format. The back-end storage format is controlled by the platform. For example if a user uploads
 * a CSV file TRAC will convert it to the default storage format (by default Arrow IPC file format).
 * Later a web application might ask for that data in JSON format and TRAC would again perform the
 * conversion. The platform uses Apache Arrow as an intermediate representation that other formats
 * are converted from and to.
 * The data API uses streaming operations to support transfer of large datasets and can be used for
 * both user-facing client applications and system-to-system integration. For regular, high-volume
 * data transfers there are other options for integration, including data import jobs and direct
 * back-end integration of the underlying storage technologies. These options can be faster and
 * reduce storage requirements, at the expense of tighter coupling between systems. A compromise is
 * to use direct integration and import jobs for a small number of critical feeds containing a
 * high volume of data, and use the data API for client access and for integration of secondary
 * and/or low-volume systems.
 * 
*/ @javax.annotation.Generated( value = "by gRPC proto compiler (version 1.68.0)", comments = "Source: tracdap/api/data.proto") @io.grpc.stub.annotations.GrpcGenerated public final class TracDataApiGrpc { private TracDataApiGrpc() {} public static final java.lang.String SERVICE_NAME = "tracdap.api.TracDataApi"; // Static method descriptors that strictly reflect the proto. private static volatile io.grpc.MethodDescriptor getCreateDatasetMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "createDataset", requestType = org.finos.tracdap.api.DataWriteRequest.class, responseType = org.finos.tracdap.metadata.TagHeader.class, methodType = io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) public static io.grpc.MethodDescriptor getCreateDatasetMethod() { io.grpc.MethodDescriptor getCreateDatasetMethod; if ((getCreateDatasetMethod = TracDataApiGrpc.getCreateDatasetMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getCreateDatasetMethod = TracDataApiGrpc.getCreateDatasetMethod) == null) { TracDataApiGrpc.getCreateDatasetMethod = getCreateDatasetMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "createDataset")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DataWriteRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.metadata.TagHeader.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("createDataset")) .build(); } } } return getCreateDatasetMethod; } private static volatile io.grpc.MethodDescriptor getCreateSmallDatasetMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "createSmallDataset", requestType = org.finos.tracdap.api.DataWriteRequest.class, responseType = org.finos.tracdap.metadata.TagHeader.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getCreateSmallDatasetMethod() { io.grpc.MethodDescriptor getCreateSmallDatasetMethod; if ((getCreateSmallDatasetMethod = TracDataApiGrpc.getCreateSmallDatasetMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getCreateSmallDatasetMethod = TracDataApiGrpc.getCreateSmallDatasetMethod) == null) { TracDataApiGrpc.getCreateSmallDatasetMethod = getCreateSmallDatasetMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "createSmallDataset")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DataWriteRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.metadata.TagHeader.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("createSmallDataset")) .build(); } } } return getCreateSmallDatasetMethod; } private static volatile io.grpc.MethodDescriptor getUpdateDatasetMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "updateDataset", requestType = org.finos.tracdap.api.DataWriteRequest.class, responseType = org.finos.tracdap.metadata.TagHeader.class, methodType = io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) public static io.grpc.MethodDescriptor getUpdateDatasetMethod() { io.grpc.MethodDescriptor getUpdateDatasetMethod; if ((getUpdateDatasetMethod = TracDataApiGrpc.getUpdateDatasetMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getUpdateDatasetMethod = TracDataApiGrpc.getUpdateDatasetMethod) == null) { TracDataApiGrpc.getUpdateDatasetMethod = getUpdateDatasetMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "updateDataset")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DataWriteRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.metadata.TagHeader.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("updateDataset")) .build(); } } } return getUpdateDatasetMethod; } private static volatile io.grpc.MethodDescriptor getUpdateSmallDatasetMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "updateSmallDataset", requestType = org.finos.tracdap.api.DataWriteRequest.class, responseType = org.finos.tracdap.metadata.TagHeader.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getUpdateSmallDatasetMethod() { io.grpc.MethodDescriptor getUpdateSmallDatasetMethod; if ((getUpdateSmallDatasetMethod = TracDataApiGrpc.getUpdateSmallDatasetMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getUpdateSmallDatasetMethod = TracDataApiGrpc.getUpdateSmallDatasetMethod) == null) { TracDataApiGrpc.getUpdateSmallDatasetMethod = getUpdateSmallDatasetMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "updateSmallDataset")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DataWriteRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.metadata.TagHeader.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("updateSmallDataset")) .build(); } } } return getUpdateSmallDatasetMethod; } private static volatile io.grpc.MethodDescriptor getReadDatasetMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "readDataset", requestType = org.finos.tracdap.api.DataReadRequest.class, responseType = org.finos.tracdap.api.DataReadResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) public static io.grpc.MethodDescriptor getReadDatasetMethod() { io.grpc.MethodDescriptor getReadDatasetMethod; if ((getReadDatasetMethod = TracDataApiGrpc.getReadDatasetMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getReadDatasetMethod = TracDataApiGrpc.getReadDatasetMethod) == null) { TracDataApiGrpc.getReadDatasetMethod = getReadDatasetMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "readDataset")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DataReadRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DataReadResponse.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("readDataset")) .build(); } } } return getReadDatasetMethod; } private static volatile io.grpc.MethodDescriptor getReadSmallDatasetMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "readSmallDataset", requestType = org.finos.tracdap.api.DataReadRequest.class, responseType = org.finos.tracdap.api.DataReadResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getReadSmallDatasetMethod() { io.grpc.MethodDescriptor getReadSmallDatasetMethod; if ((getReadSmallDatasetMethod = TracDataApiGrpc.getReadSmallDatasetMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getReadSmallDatasetMethod = TracDataApiGrpc.getReadSmallDatasetMethod) == null) { TracDataApiGrpc.getReadSmallDatasetMethod = getReadSmallDatasetMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "readSmallDataset")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DataReadRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DataReadResponse.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("readSmallDataset")) .build(); } } } return getReadSmallDatasetMethod; } private static volatile io.grpc.MethodDescriptor getCreateFileMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "createFile", requestType = org.finos.tracdap.api.FileWriteRequest.class, responseType = org.finos.tracdap.metadata.TagHeader.class, methodType = io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) public static io.grpc.MethodDescriptor getCreateFileMethod() { io.grpc.MethodDescriptor getCreateFileMethod; if ((getCreateFileMethod = TracDataApiGrpc.getCreateFileMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getCreateFileMethod = TracDataApiGrpc.getCreateFileMethod) == null) { TracDataApiGrpc.getCreateFileMethod = getCreateFileMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "createFile")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.FileWriteRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.metadata.TagHeader.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("createFile")) .build(); } } } return getCreateFileMethod; } private static volatile io.grpc.MethodDescriptor getCreateSmallFileMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "createSmallFile", requestType = org.finos.tracdap.api.FileWriteRequest.class, responseType = org.finos.tracdap.metadata.TagHeader.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getCreateSmallFileMethod() { io.grpc.MethodDescriptor getCreateSmallFileMethod; if ((getCreateSmallFileMethod = TracDataApiGrpc.getCreateSmallFileMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getCreateSmallFileMethod = TracDataApiGrpc.getCreateSmallFileMethod) == null) { TracDataApiGrpc.getCreateSmallFileMethod = getCreateSmallFileMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "createSmallFile")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.FileWriteRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.metadata.TagHeader.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("createSmallFile")) .build(); } } } return getCreateSmallFileMethod; } private static volatile io.grpc.MethodDescriptor getUpdateFileMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "updateFile", requestType = org.finos.tracdap.api.FileWriteRequest.class, responseType = org.finos.tracdap.metadata.TagHeader.class, methodType = io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) public static io.grpc.MethodDescriptor getUpdateFileMethod() { io.grpc.MethodDescriptor getUpdateFileMethod; if ((getUpdateFileMethod = TracDataApiGrpc.getUpdateFileMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getUpdateFileMethod = TracDataApiGrpc.getUpdateFileMethod) == null) { TracDataApiGrpc.getUpdateFileMethod = getUpdateFileMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.CLIENT_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "updateFile")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.FileWriteRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.metadata.TagHeader.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("updateFile")) .build(); } } } return getUpdateFileMethod; } private static volatile io.grpc.MethodDescriptor getUpdateSmallFileMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "updateSmallFile", requestType = org.finos.tracdap.api.FileWriteRequest.class, responseType = org.finos.tracdap.metadata.TagHeader.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getUpdateSmallFileMethod() { io.grpc.MethodDescriptor getUpdateSmallFileMethod; if ((getUpdateSmallFileMethod = TracDataApiGrpc.getUpdateSmallFileMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getUpdateSmallFileMethod = TracDataApiGrpc.getUpdateSmallFileMethod) == null) { TracDataApiGrpc.getUpdateSmallFileMethod = getUpdateSmallFileMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "updateSmallFile")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.FileWriteRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.metadata.TagHeader.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("updateSmallFile")) .build(); } } } return getUpdateSmallFileMethod; } private static volatile io.grpc.MethodDescriptor getReadFileMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "readFile", requestType = org.finos.tracdap.api.FileReadRequest.class, responseType = org.finos.tracdap.api.FileReadResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) public static io.grpc.MethodDescriptor getReadFileMethod() { io.grpc.MethodDescriptor getReadFileMethod; if ((getReadFileMethod = TracDataApiGrpc.getReadFileMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getReadFileMethod = TracDataApiGrpc.getReadFileMethod) == null) { TracDataApiGrpc.getReadFileMethod = getReadFileMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "readFile")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.FileReadRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.FileReadResponse.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("readFile")) .build(); } } } return getReadFileMethod; } private static volatile io.grpc.MethodDescriptor getReadSmallFileMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "readSmallFile", requestType = org.finos.tracdap.api.FileReadRequest.class, responseType = org.finos.tracdap.api.FileReadResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor getReadSmallFileMethod() { io.grpc.MethodDescriptor getReadSmallFileMethod; if ((getReadSmallFileMethod = TracDataApiGrpc.getReadSmallFileMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getReadSmallFileMethod = TracDataApiGrpc.getReadSmallFileMethod) == null) { TracDataApiGrpc.getReadSmallFileMethod = getReadSmallFileMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "readSmallFile")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.FileReadRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.FileReadResponse.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("readSmallFile")) .build(); } } } return getReadSmallFileMethod; } private static volatile io.grpc.MethodDescriptor getDownloadFileMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "downloadFile", requestType = org.finos.tracdap.api.DownloadRequest.class, responseType = org.finos.tracdap.api.DownloadResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) public static io.grpc.MethodDescriptor getDownloadFileMethod() { io.grpc.MethodDescriptor getDownloadFileMethod; if ((getDownloadFileMethod = TracDataApiGrpc.getDownloadFileMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getDownloadFileMethod = TracDataApiGrpc.getDownloadFileMethod) == null) { TracDataApiGrpc.getDownloadFileMethod = getDownloadFileMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "downloadFile")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DownloadRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DownloadResponse.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("downloadFile")) .build(); } } } return getDownloadFileMethod; } private static volatile io.grpc.MethodDescriptor getDownloadLatestFileMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "downloadLatestFile", requestType = org.finos.tracdap.api.DownloadRequest.class, responseType = org.finos.tracdap.api.DownloadResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) public static io.grpc.MethodDescriptor getDownloadLatestFileMethod() { io.grpc.MethodDescriptor getDownloadLatestFileMethod; if ((getDownloadLatestFileMethod = TracDataApiGrpc.getDownloadLatestFileMethod) == null) { synchronized (TracDataApiGrpc.class) { if ((getDownloadLatestFileMethod = TracDataApiGrpc.getDownloadLatestFileMethod) == null) { TracDataApiGrpc.getDownloadLatestFileMethod = getDownloadLatestFileMethod = io.grpc.MethodDescriptor.newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "downloadLatestFile")) .setSampledToLocalTracing(true) .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DownloadRequest.getDefaultInstance())) .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller( org.finos.tracdap.api.DownloadResponse.getDefaultInstance())) .setSchemaDescriptor(new TracDataApiMethodDescriptorSupplier("downloadLatestFile")) .build(); } } } return getDownloadLatestFileMethod; } /** * Creates a new async stub that supports all call types for the service */ public static TracDataApiStub newStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public TracDataApiStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new TracDataApiStub(channel, callOptions); } }; return TracDataApiStub.newStub(factory, channel); } /** * Creates a new blocking-style stub that supports unary and streaming output calls on the service */ public static TracDataApiBlockingStub newBlockingStub( io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public TracDataApiBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new TracDataApiBlockingStub(channel, callOptions); } }; return TracDataApiBlockingStub.newStub(factory, channel); } /** * Creates a new ListenableFuture-style stub that supports unary calls on the service */ public static TracDataApiFutureStub newFutureStub( io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public TracDataApiFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new TracDataApiFutureStub(channel, callOptions); } }; return TracDataApiFutureStub.newStub(factory, channel); } /** *
   **
   * Public API for creating, updating, reading and querying primary data stored in the TRAC platform.
   * The TRAC data API provides a standard mechanism for client applications to store and access data
   * in the TRAC platform. Calls are translated into the underlying storage mechanisms, using push-down
   * operations where possible for efficient queries on large datasets.
   * The data API includes format translation, so data can be uploaded and retrieved in any supported
   * format. The back-end storage format is controlled by the platform. For example if a user uploads
   * a CSV file TRAC will convert it to the default storage format (by default Arrow IPC file format).
   * Later a web application might ask for that data in JSON format and TRAC would again perform the
   * conversion. The platform uses Apache Arrow as an intermediate representation that other formats
   * are converted from and to.
   * The data API uses streaming operations to support transfer of large datasets and can be used for
   * both user-facing client applications and system-to-system integration. For regular, high-volume
   * data transfers there are other options for integration, including data import jobs and direct
   * back-end integration of the underlying storage technologies. These options can be faster and
   * reduce storage requirements, at the expense of tighter coupling between systems. A compromise is
   * to use direct integration and import jobs for a small number of critical feeds containing a
   * high volume of data, and use the data API for client access and for integration of secondary
   * and/or low-volume systems.
   * 
*/ public interface AsyncService { /** *
     **
     * Create a new dataset, supplying the schema and content as a data stream
     * This method creates a new dataset and a corresponding DATA object
     * in the TRAC metadata store. Once a dataset is created it can be used as an
     * input into a model run, it can also be read and queried using the data API.
     * Data can be supplied in any format supported by the platform.
     * *Although large datasets can be uploaded using this call, data import jobs
     * are normally used to bring in large volumes of data from external systems.*
     * The request must specify a schema for the dataset, incoming data will be
     * verified against the schema. Schemas can be specified using either:
     *    * A full schema definition - if a full schema is supplied, it will be embedded
     *      with the dataset and used for this dataset only
     *    * A schema ID - a tag selector for an existing SCHEMA object, which may be
     *      shared by multiple datasets
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. TRAC will apply format conversion before the data is
     * processed and stored.
     * Tag updates can be supplied to tag the newly created dataset, they behave exactly
     * the same as tag updates in the createObject() call of TracMetadataApi.
     * This is a client streaming method. The first message in the request stream
     * must contain all the request fields and metadata, including a schema specifier.
     * After the first message all metadata fields should be omitted. Subsequent messages
     * should contain the content of the dataset as a series of chunks, encoded as per
     * the "format" field of the first message. Clients may choose whether or not to
     * include a chunk of data in the first message and empty (i.e. zero-length) chunks
     * are permitted at any point in the stream.
     * This method returns the header of the newly created DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), format not supported, data does not match schema, corrupt or invalid
     * data stream. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology. In the event of an error, TRAC will do its
     * best to clean up any partially-written data in the storage layer.
     * 
*/ default io.grpc.stub.StreamObserver createDataset( io.grpc.stub.StreamObserver responseObserver) { return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getCreateDatasetMethod(), responseObserver); } /** *
     **
     * Create a new dataset, supplying the schema and content as a single blob
     * This method creates a new dataset and a corresponding DATA object
     * in the TRAC metadata store. Once a dataset is created it can be used as an
     * input into a model run, it can also be read and queried using the data API.
     * Data can be supplied in any format supported by the platform.
     * The request must specify a schema for the dataset, incoming data will be
     * verified against the schema. Schemas can be specified using either:
     *    * A full schema definition - if a full schema is supplied, it will be embedded
     *      with the dataset and used for this dataset only
     *    * A schema ID - a tag selector for an existing SCHEMA object, which may be
     *      shared by multiple datasets
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. TRAC will apply format conversion before the data is
     * processed and stored.
     * Tag updates can be supplied to tag the newly created dataset, they behave exactly
     * the same as tag updates in the createObject() call of TracMetadataApi.
     * This is a unary call, all the request fields and metadata (including schema specifier)
     * and dataset content encoded as per the "format" field are supplied in a single message.
     * It is intended for working with small datasets and for use in environments where client
     * streaming is not available (particularly in gRPC-Web clients).
     * This method returns the header of the newly created DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), format not supported, data does not match schema, corrupt or invalid
     * data stream. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology. In the event of an error, TRAC will do its
     * best to clean up any partially-written data in the storage layer.
     * 
*/ default void createSmallDataset(org.finos.tracdap.api.DataWriteRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getCreateSmallDatasetMethod(), responseObserver); } /** *
     **
     * Update an existing dataset, supplying the schema and content as a data stream
     * This method updates an existing dataset and the corresponding DATA object
     * in the TRAC metadata store. As per the TRAC immutability guarantee, the original
     * version of the dataset is not altered. After an update, both the original version
     * and the new version are available to use as inputs into a model runs and to read
     * and query using the data API. Data can be supplied in any format supported by the
     * platform.
     * *Although large datasets can be uploaded using this call, data import jobs
     * are normally used to bring in large volumes of data from external systems.*
     * To update a dataset, the priorVersion field must indicate the dataset being updated.
     * Only the latest version of a dataset can be updated.
     * The request must specify a schema for the new version of the dataset, incoming data
     * will be verified against the schema. The new schema must be compatible with the schema
     * of the previous version. Schemas can be specified using either:
     *    * A full schema definition - Datasets created using an embedded schema must supply
     *      a full schema for all subsequent versions and each schema version must be compatible
     *      with the version before. Fields may be added, but not removed or altered.
     *    * A schema ID - Datasets created using an external schema must use the same external
     *      schema ID for all subsequent versions. It is permitted for later versions of a
     *      dataset to use later versions of the external schema, but not earlier versions.
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. It is not necessary for different versions of the same dataset
     * to be uploaded using the same format. TRAC will apply format conversion before the
     * data is processed and stored.
     * Tag updates can be supplied to tag the new version of the dataset, they behave exactly
     * the same as tag updates in the updateObject() call of TracMetadataApi.
     * This is a client streaming method. The first message in the request stream
     * must contain all the request fields and metadata, including a schema specifier.
     * After the first message all metadata fields should be omitted. Subsequent messages
     * should contain the content of the dataset as a series of chunks, encoded as per
     * the "format" field of the first message. Clients may choose whether or not to
     * include a chunk of data in the first message and empty (i.e. zero-length) chunks
     * are permitted at any point in the stream.
     * This method returns the header of the version of the DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), schema version not compatible, format not supported, data does not match
     * schema, corrupt or invalid data stream. Storage errors may also be reported if there is
     * a problem communicating with the underlying storage technology. In the event of an error,
     * TRAC will do its best to clean up any partially-written data in the storage layer.
     * 
*/ default io.grpc.stub.StreamObserver updateDataset( io.grpc.stub.StreamObserver responseObserver) { return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getUpdateDatasetMethod(), responseObserver); } /** *
     **
     * Update an existing dataset, supplying the schema and content as a single blob
     * This method updates an existing dataset and the corresponding DATA object
     * in the TRAC metadata store. As per the TRAC immutability guarantee, the original
     * version of the dataset is not altered. After an update, both the original version
     * and the new version are available to use as inputs into a model runs and to read
     * and query using the data API. Data can be supplied in any format supported by the
     * platform.
     * To update a dataset, the priorVersion field must indicate the dataset being updated.
     * Only the latest version of a dataset can be updated.
     * The request must specify a schema for the new version of the dataset, incoming data
     * will be verified against the schema. The new schema must be compatible with the schema
     * of the previous version. Schemas can be specified using either:
     *    * A full schema definition - Datasets created using an embedded schema must supply
     *      a full schema for all subsequent versions and each schema version must be compatible
     *      with the version before. Fields may be added, but not removed or altered.
     *    * A schema ID - Datasets created using an external schema must use the same external
     *      schema ID for all subsequent versions. It is permitted for later versions of a
     *      dataset to use later versions of the external schema, but not earlier versions.
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. It is not necessary for different versions of the same dataset
     * to be uploaded using the same format. TRAC will apply format conversion before the
     * data is processed and stored.
     * Tag updates can be supplied to tag the new version of the dataset, they behave exactly
     * the same as tag updates in the updateObject() call of TracMetadataApi.
     * This is a unary call, all the request fields and metadata (including schema specifier)
     * and dataset content encoded as per the "format" field are supplied in a single message.
     * It is intended for working with small datasets and for use in environments where client
     * streaming is not available (particularly in gRPC-Web clients).
     * This method returns the header of the version of the DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), schema version not compatible, format not supported, data does not match
     * schema, corrupt or invalid data stream. Storage errors may also be reported if there is
     * a problem communicating with the underlying storage technology. In the event of an error,
     * TRAC will do its best to clean up any partially-written data in the storage layer.
     * 
*/ default void updateSmallDataset(org.finos.tracdap.api.DataWriteRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUpdateSmallDatasetMethod(), responseObserver); } /** *
     **
     * Read an existing dataset, returning the content as a data stream
     * This method reads the contents of an existing dataset and returns it in the
     * requested format, along with a copy of the data schema. Data can be requested
     * in any format supported by the platform.
     * The request uses a regular TagSelector to indicate which dataset and version to read.
     * The format parameter is a mime type and must be a supported data format.
     * This is a server streaming method. The first message in the response stream will
     * contain a schema definition for the dataset (this may come from an embedded schema
     * or an external schema object). The second and subsequent messages will deliver the
     * content of the dataset in the requested format. TRAC guarantees that the first message
     * will always contain an empty chunk of content, which can be safely ignored.
     * Error conditions include: Invalid request, unknown tenant, object not found, format
     * not supported. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology.
     * 
*/ default void readDataset(org.finos.tracdap.api.DataReadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadDatasetMethod(), responseObserver); } /** *
     **
     * Read an existing dataset, returning the content as a single blob
     * This method reads the contents of an existing dataset and returns it in the
     * requested format, along with a copy of the data schema. Data can be requested
     * in any format supported by the platform.
     * The request uses a regular TagSelector to indicate which dataset and version to read.
     * The format parameter is a mime type and must be a supported data format.
     * This is a unary call, both the schema and the content of the dataset are returned
     * in a single response message. The content of the dataset will be encoded in the
     * requested format. Errors may occur if the content of the dataset is too large to
     * fit in a single message frame.
     * Error conditions include: Invalid request, unknown tenant, object not found, format
     * not supported. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology.
     * 
*/ default void readSmallDataset(org.finos.tracdap.api.DataReadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadSmallDatasetMethod(), responseObserver); } /** *
     **
     * Upload a new file into TRAC, sending the content as a data stream
     * Calling this method will create a new FILE object in the metadata store.
     * Tag updates can be supplied when creating a FILE, they will be passed on to the
     * metadata service. The semantics for tag updates are identical to the createObject()
     * method in TracMetadataApi.
     * This is a client streaming method. The first message in the request stream
     * must contain all the request fields and required metadata. The second and subsequent
     * messages should contain the content of the file as a series of chunks (byte buffers).
     * (other fields that are set after the first message will be ignored).
     * Empty chunks can be included at any point in the stream and will be ignored.
     * Clients may choose to include the first chunk in the first message along with the
     * request metadata, or to put an empty chunk in the first message and start streaming
     * content in the second message. For very small files, it is possible to put the entire
     * content in one chunk in the first message, so there is only a single message in the stream.
     * All of these approaches are supported.
     * Clients may specify the size of the file being created. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The method returns the header of the newly created FILE object. Error conditions
     * include: Invalid request, unknown tenant and validation failure and data loss
     * (if the number of bytes stored does not match the number specified in the request).
     * Storage errors may also be reported if there is a problem communicating with the
     * underlying storage technology. In the event of an error, TRAC will do its best to
     * clean up any partially-written data in the storage layer.
     * 
*/ default io.grpc.stub.StreamObserver createFile( io.grpc.stub.StreamObserver responseObserver) { return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getCreateFileMethod(), responseObserver); } /** *
     **
     * Upload a new file into TRAC, sending the content as a single blob
     * Calling this method will create a new FILE object in the metadata store.
     * Tag updates can be supplied when creating a FILE, they will be passed on to the
     * metadata service. The semantics for tag updates are identical to the createObject()
     * method in TracMetadataApi.
     * This is a unary method. The request must contain all the relevant fields and the
     * entire content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Clients may specify the size of the file being created. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The method returns the header of the newly created FILE object. Error conditions
     * include: Invalid request, unknown tenant and validation failure, file too large and
     * data loss (if the number of bytes stored does not match the number specified in the
     * request). Storage errors may also be reported if there is a problem communicating with
     * the underlying storage technology. In the event of an error, TRAC will do its best to
     * clean up any partially-written data in the storage layer.
     * 
*/ default void createSmallFile(org.finos.tracdap.api.FileWriteRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getCreateSmallFileMethod(), responseObserver); } /** *
     **
     * Upload a new version of an existing file into TRAC, sending the content as a data stream
     * Calling this method will update the relevant FILE object in the metadata store.
     * The latest version of the FILE must be supplied in the priorVersion field
     * of the request. For example if the latest version of a FILE object is version 2,
     * the priorVersion field should refer to version 2 and TRAC will create version 3
     * as a result of the update call. The metadata and content of prior versions
     * remain unaltered. The file name may be changed between versions, but the extension
     * and mime type must stay the same. Tag updates can be supplied when updating a FILE,
     * they will be passed on to the metadata service> The semantics for tag updates are
     * identical to the updateObject() method in TracMetadataApi.
     * This is a client streaming method. The first message in the request stream
     * must contain all the request fields and required metadata. The second and subsequent
     * messages should contain the content of the file as a series of byte buffers.
     * (other fields that are set after the first message will be ignored).
     * Empty chunks can be included at any point in the stream and will be ignored.
     * Clients may choose to include the first chunk in the first message along with the
     * request metadata, or to put an empty chunk in the first message and start streaming
     * content in the second message. For very small files, it is possible to put the entire
     * content in one chunk in the first message, so there is only a single message in the stream.
     * All of these approaches are supported.
     * Clients may specify the size of the file being updated. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The call returns the header for the new version of the FILE object. Error conditions
     * include: Invalid request, unknown tenant, validation failure, failed preconditions
     * (e.g. extension and mime type changes) and data loss (if the number of bytes stored
     * does not match the number specified in the request). Storage errors may also be reported
     * if there is a problem communicating with the underlying storage technology. In the event
     * of an error, TRAC will do its best to clean up any partially-written data in the storage layer.
     * 
*/ default io.grpc.stub.StreamObserver updateFile( io.grpc.stub.StreamObserver responseObserver) { return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getUpdateFileMethod(), responseObserver); } /** *
     **
     * Upload a new version of an existing file into TRAC, sending the content as a single blob
     * Calling this method will update the relevant FILE object in the metadata store.
     * The latest version of the FILE must be supplied in the priorVersion field
     * of the request. For example if the latest version of a FILE object is version 2,
     * the priorVersion field should refer to version 2 and TRAC will create version 3
     * as a result of the update call. The metadata and content of prior versions
     * remain unaltered. The file name may be changed between versions, but the extension
     * and mime type must stay the same. Tag updates can be supplied when updating a FILE,
     * they will be passed on to the metadata service> The semantics for tag updates are
     * identical to the updateObject() method in TracMetadataApi.
     * This is a unary call. The request must contain all the relevant fields and the
     * entire content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Clients may specify the size of the file being updated. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The call returns the header for the new version of the FILE object. Error conditions
     * include: Invalid request, unknown tenant, validation failure, failed preconditions
     * (e.g. extension and mime type changes) file too large and data loss (if the number of
     * bytes stored does not match the number specified in the request). Storage errors may also
     * be reported if there is a problem communicating with the underlying storage technology.
     * In the event of an error, TRAC will do its best to clean up any partially-written data in
     * the storage layer.
     * 
*/ default void updateSmallFile(org.finos.tracdap.api.FileWriteRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getUpdateSmallFileMethod(), responseObserver); } /** *
     **
     * Download a file that has been stored in TRAC and return it as a data stream
     * The request uses a regular TagSelector to indicate which file to read. The
     * semantics of the request are identical to the readObject() method in
     * TracMetadataApi.
     * This is a server streaming method. The first message in the response stream will
     * contain the response metadata (i.e. the file definition). The second
     * and subsequent messages will deliver the content of the file as a stream of chunks
     * (byte buffers). Empty chunks may be included at any point in the stream and
     * should be ignored. In particular, TRAC guarantees that the chunk in the first
     * message will always be an empty chunk. Clients are free to ignore this chunk,
     * for example if they have a separate function for processing the first message in
     * the response stream. Alternatively clients may process the empty chunk in the firs
     * message in the same way as any other chunk. Both approaches are supported.
     * Error conditions include: Invalid request, unknown tenant, unknown object ID,
     * object type does not match ID, unknown object version, unknown tag version.
     * Storage errors may also be reported if there is a problem communicating with the
     * underlying storage technology.
     * 
*/ default void readFile(org.finos.tracdap.api.FileReadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadFileMethod(), responseObserver); } /** *
     **
     * Download a file that has been stored in TRAC and return it as a single blob
     * The request uses a regular TagSelector to indicate which file to read. The
     * semantics of the request are identical to the readObject() method in
     * TracMetadataApi.
     * This is a unary method, the response will contain the file definition and the
     * whole content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Error conditions include: Invalid request, unknown tenant, unknown object ID,
     * object type does not match ID, unknown object version, unknown tag version,
     * file too large. Storage errors may also be reported if there is a problem
     * communicating with the underlying storage technology.
     * 
*/ default void readSmallFile(org.finos.tracdap.api.FileReadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getReadSmallFileMethod(), responseObserver); } /** *
     **
     * Download a file as a data stream
     * This method is intended for use by browsers and other HTTP clients
     * to download a file using an HTTP GET request.
     * 
*/ default void downloadFile(org.finos.tracdap.api.DownloadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getDownloadFileMethod(), responseObserver); } /** *
     **
     * Download the latest version of a file as a data stream
     * This method is intended for use by browsers and other HTTP clients
     * to download a file using an HTTP GET request.
     * 
*/ default void downloadLatestFile(org.finos.tracdap.api.DownloadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getDownloadLatestFileMethod(), responseObserver); } } /** * Base class for the server implementation of the service TracDataApi. *
   **
   * Public API for creating, updating, reading and querying primary data stored in the TRAC platform.
   * The TRAC data API provides a standard mechanism for client applications to store and access data
   * in the TRAC platform. Calls are translated into the underlying storage mechanisms, using push-down
   * operations where possible for efficient queries on large datasets.
   * The data API includes format translation, so data can be uploaded and retrieved in any supported
   * format. The back-end storage format is controlled by the platform. For example if a user uploads
   * a CSV file TRAC will convert it to the default storage format (by default Arrow IPC file format).
   * Later a web application might ask for that data in JSON format and TRAC would again perform the
   * conversion. The platform uses Apache Arrow as an intermediate representation that other formats
   * are converted from and to.
   * The data API uses streaming operations to support transfer of large datasets and can be used for
   * both user-facing client applications and system-to-system integration. For regular, high-volume
   * data transfers there are other options for integration, including data import jobs and direct
   * back-end integration of the underlying storage technologies. These options can be faster and
   * reduce storage requirements, at the expense of tighter coupling between systems. A compromise is
   * to use direct integration and import jobs for a small number of critical feeds containing a
   * high volume of data, and use the data API for client access and for integration of secondary
   * and/or low-volume systems.
   * 
*/ public static abstract class TracDataApiImplBase implements io.grpc.BindableService, AsyncService { @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { return TracDataApiGrpc.bindService(this); } } /** * A stub to allow clients to do asynchronous rpc calls to service TracDataApi. *
   **
   * Public API for creating, updating, reading and querying primary data stored in the TRAC platform.
   * The TRAC data API provides a standard mechanism for client applications to store and access data
   * in the TRAC platform. Calls are translated into the underlying storage mechanisms, using push-down
   * operations where possible for efficient queries on large datasets.
   * The data API includes format translation, so data can be uploaded and retrieved in any supported
   * format. The back-end storage format is controlled by the platform. For example if a user uploads
   * a CSV file TRAC will convert it to the default storage format (by default Arrow IPC file format).
   * Later a web application might ask for that data in JSON format and TRAC would again perform the
   * conversion. The platform uses Apache Arrow as an intermediate representation that other formats
   * are converted from and to.
   * The data API uses streaming operations to support transfer of large datasets and can be used for
   * both user-facing client applications and system-to-system integration. For regular, high-volume
   * data transfers there are other options for integration, including data import jobs and direct
   * back-end integration of the underlying storage technologies. These options can be faster and
   * reduce storage requirements, at the expense of tighter coupling between systems. A compromise is
   * to use direct integration and import jobs for a small number of critical feeds containing a
   * high volume of data, and use the data API for client access and for integration of secondary
   * and/or low-volume systems.
   * 
*/ public static final class TracDataApiStub extends io.grpc.stub.AbstractAsyncStub { private TracDataApiStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected TracDataApiStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new TracDataApiStub(channel, callOptions); } /** *
     **
     * Create a new dataset, supplying the schema and content as a data stream
     * This method creates a new dataset and a corresponding DATA object
     * in the TRAC metadata store. Once a dataset is created it can be used as an
     * input into a model run, it can also be read and queried using the data API.
     * Data can be supplied in any format supported by the platform.
     * *Although large datasets can be uploaded using this call, data import jobs
     * are normally used to bring in large volumes of data from external systems.*
     * The request must specify a schema for the dataset, incoming data will be
     * verified against the schema. Schemas can be specified using either:
     *    * A full schema definition - if a full schema is supplied, it will be embedded
     *      with the dataset and used for this dataset only
     *    * A schema ID - a tag selector for an existing SCHEMA object, which may be
     *      shared by multiple datasets
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. TRAC will apply format conversion before the data is
     * processed and stored.
     * Tag updates can be supplied to tag the newly created dataset, they behave exactly
     * the same as tag updates in the createObject() call of TracMetadataApi.
     * This is a client streaming method. The first message in the request stream
     * must contain all the request fields and metadata, including a schema specifier.
     * After the first message all metadata fields should be omitted. Subsequent messages
     * should contain the content of the dataset as a series of chunks, encoded as per
     * the "format" field of the first message. Clients may choose whether or not to
     * include a chunk of data in the first message and empty (i.e. zero-length) chunks
     * are permitted at any point in the stream.
     * This method returns the header of the newly created DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), format not supported, data does not match schema, corrupt or invalid
     * data stream. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology. In the event of an error, TRAC will do its
     * best to clean up any partially-written data in the storage layer.
     * 
*/ public io.grpc.stub.StreamObserver createDataset( io.grpc.stub.StreamObserver responseObserver) { return io.grpc.stub.ClientCalls.asyncClientStreamingCall( getChannel().newCall(getCreateDatasetMethod(), getCallOptions()), responseObserver); } /** *
     **
     * Create a new dataset, supplying the schema and content as a single blob
     * This method creates a new dataset and a corresponding DATA object
     * in the TRAC metadata store. Once a dataset is created it can be used as an
     * input into a model run, it can also be read and queried using the data API.
     * Data can be supplied in any format supported by the platform.
     * The request must specify a schema for the dataset, incoming data will be
     * verified against the schema. Schemas can be specified using either:
     *    * A full schema definition - if a full schema is supplied, it will be embedded
     *      with the dataset and used for this dataset only
     *    * A schema ID - a tag selector for an existing SCHEMA object, which may be
     *      shared by multiple datasets
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. TRAC will apply format conversion before the data is
     * processed and stored.
     * Tag updates can be supplied to tag the newly created dataset, they behave exactly
     * the same as tag updates in the createObject() call of TracMetadataApi.
     * This is a unary call, all the request fields and metadata (including schema specifier)
     * and dataset content encoded as per the "format" field are supplied in a single message.
     * It is intended for working with small datasets and for use in environments where client
     * streaming is not available (particularly in gRPC-Web clients).
     * This method returns the header of the newly created DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), format not supported, data does not match schema, corrupt or invalid
     * data stream. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology. In the event of an error, TRAC will do its
     * best to clean up any partially-written data in the storage layer.
     * 
*/ public void createSmallDataset(org.finos.tracdap.api.DataWriteRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getCreateSmallDatasetMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Update an existing dataset, supplying the schema and content as a data stream
     * This method updates an existing dataset and the corresponding DATA object
     * in the TRAC metadata store. As per the TRAC immutability guarantee, the original
     * version of the dataset is not altered. After an update, both the original version
     * and the new version are available to use as inputs into a model runs and to read
     * and query using the data API. Data can be supplied in any format supported by the
     * platform.
     * *Although large datasets can be uploaded using this call, data import jobs
     * are normally used to bring in large volumes of data from external systems.*
     * To update a dataset, the priorVersion field must indicate the dataset being updated.
     * Only the latest version of a dataset can be updated.
     * The request must specify a schema for the new version of the dataset, incoming data
     * will be verified against the schema. The new schema must be compatible with the schema
     * of the previous version. Schemas can be specified using either:
     *    * A full schema definition - Datasets created using an embedded schema must supply
     *      a full schema for all subsequent versions and each schema version must be compatible
     *      with the version before. Fields may be added, but not removed or altered.
     *    * A schema ID - Datasets created using an external schema must use the same external
     *      schema ID for all subsequent versions. It is permitted for later versions of a
     *      dataset to use later versions of the external schema, but not earlier versions.
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. It is not necessary for different versions of the same dataset
     * to be uploaded using the same format. TRAC will apply format conversion before the
     * data is processed and stored.
     * Tag updates can be supplied to tag the new version of the dataset, they behave exactly
     * the same as tag updates in the updateObject() call of TracMetadataApi.
     * This is a client streaming method. The first message in the request stream
     * must contain all the request fields and metadata, including a schema specifier.
     * After the first message all metadata fields should be omitted. Subsequent messages
     * should contain the content of the dataset as a series of chunks, encoded as per
     * the "format" field of the first message. Clients may choose whether or not to
     * include a chunk of data in the first message and empty (i.e. zero-length) chunks
     * are permitted at any point in the stream.
     * This method returns the header of the version of the DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), schema version not compatible, format not supported, data does not match
     * schema, corrupt or invalid data stream. Storage errors may also be reported if there is
     * a problem communicating with the underlying storage technology. In the event of an error,
     * TRAC will do its best to clean up any partially-written data in the storage layer.
     * 
*/ public io.grpc.stub.StreamObserver updateDataset( io.grpc.stub.StreamObserver responseObserver) { return io.grpc.stub.ClientCalls.asyncClientStreamingCall( getChannel().newCall(getUpdateDatasetMethod(), getCallOptions()), responseObserver); } /** *
     **
     * Update an existing dataset, supplying the schema and content as a single blob
     * This method updates an existing dataset and the corresponding DATA object
     * in the TRAC metadata store. As per the TRAC immutability guarantee, the original
     * version of the dataset is not altered. After an update, both the original version
     * and the new version are available to use as inputs into a model runs and to read
     * and query using the data API. Data can be supplied in any format supported by the
     * platform.
     * To update a dataset, the priorVersion field must indicate the dataset being updated.
     * Only the latest version of a dataset can be updated.
     * The request must specify a schema for the new version of the dataset, incoming data
     * will be verified against the schema. The new schema must be compatible with the schema
     * of the previous version. Schemas can be specified using either:
     *    * A full schema definition - Datasets created using an embedded schema must supply
     *      a full schema for all subsequent versions and each schema version must be compatible
     *      with the version before. Fields may be added, but not removed or altered.
     *    * A schema ID - Datasets created using an external schema must use the same external
     *      schema ID for all subsequent versions. It is permitted for later versions of a
     *      dataset to use later versions of the external schema, but not earlier versions.
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. It is not necessary for different versions of the same dataset
     * to be uploaded using the same format. TRAC will apply format conversion before the
     * data is processed and stored.
     * Tag updates can be supplied to tag the new version of the dataset, they behave exactly
     * the same as tag updates in the updateObject() call of TracMetadataApi.
     * This is a unary call, all the request fields and metadata (including schema specifier)
     * and dataset content encoded as per the "format" field are supplied in a single message.
     * It is intended for working with small datasets and for use in environments where client
     * streaming is not available (particularly in gRPC-Web clients).
     * This method returns the header of the version of the DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), schema version not compatible, format not supported, data does not match
     * schema, corrupt or invalid data stream. Storage errors may also be reported if there is
     * a problem communicating with the underlying storage technology. In the event of an error,
     * TRAC will do its best to clean up any partially-written data in the storage layer.
     * 
*/ public void updateSmallDataset(org.finos.tracdap.api.DataWriteRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getUpdateSmallDatasetMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Read an existing dataset, returning the content as a data stream
     * This method reads the contents of an existing dataset and returns it in the
     * requested format, along with a copy of the data schema. Data can be requested
     * in any format supported by the platform.
     * The request uses a regular TagSelector to indicate which dataset and version to read.
     * The format parameter is a mime type and must be a supported data format.
     * This is a server streaming method. The first message in the response stream will
     * contain a schema definition for the dataset (this may come from an embedded schema
     * or an external schema object). The second and subsequent messages will deliver the
     * content of the dataset in the requested format. TRAC guarantees that the first message
     * will always contain an empty chunk of content, which can be safely ignored.
     * Error conditions include: Invalid request, unknown tenant, object not found, format
     * not supported. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology.
     * 
*/ public void readDataset(org.finos.tracdap.api.DataReadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncServerStreamingCall( getChannel().newCall(getReadDatasetMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Read an existing dataset, returning the content as a single blob
     * This method reads the contents of an existing dataset and returns it in the
     * requested format, along with a copy of the data schema. Data can be requested
     * in any format supported by the platform.
     * The request uses a regular TagSelector to indicate which dataset and version to read.
     * The format parameter is a mime type and must be a supported data format.
     * This is a unary call, both the schema and the content of the dataset are returned
     * in a single response message. The content of the dataset will be encoded in the
     * requested format. Errors may occur if the content of the dataset is too large to
     * fit in a single message frame.
     * Error conditions include: Invalid request, unknown tenant, object not found, format
     * not supported. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology.
     * 
*/ public void readSmallDataset(org.finos.tracdap.api.DataReadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getReadSmallDatasetMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Upload a new file into TRAC, sending the content as a data stream
     * Calling this method will create a new FILE object in the metadata store.
     * Tag updates can be supplied when creating a FILE, they will be passed on to the
     * metadata service. The semantics for tag updates are identical to the createObject()
     * method in TracMetadataApi.
     * This is a client streaming method. The first message in the request stream
     * must contain all the request fields and required metadata. The second and subsequent
     * messages should contain the content of the file as a series of chunks (byte buffers).
     * (other fields that are set after the first message will be ignored).
     * Empty chunks can be included at any point in the stream and will be ignored.
     * Clients may choose to include the first chunk in the first message along with the
     * request metadata, or to put an empty chunk in the first message and start streaming
     * content in the second message. For very small files, it is possible to put the entire
     * content in one chunk in the first message, so there is only a single message in the stream.
     * All of these approaches are supported.
     * Clients may specify the size of the file being created. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The method returns the header of the newly created FILE object. Error conditions
     * include: Invalid request, unknown tenant and validation failure and data loss
     * (if the number of bytes stored does not match the number specified in the request).
     * Storage errors may also be reported if there is a problem communicating with the
     * underlying storage technology. In the event of an error, TRAC will do its best to
     * clean up any partially-written data in the storage layer.
     * 
*/ public io.grpc.stub.StreamObserver createFile( io.grpc.stub.StreamObserver responseObserver) { return io.grpc.stub.ClientCalls.asyncClientStreamingCall( getChannel().newCall(getCreateFileMethod(), getCallOptions()), responseObserver); } /** *
     **
     * Upload a new file into TRAC, sending the content as a single blob
     * Calling this method will create a new FILE object in the metadata store.
     * Tag updates can be supplied when creating a FILE, they will be passed on to the
     * metadata service. The semantics for tag updates are identical to the createObject()
     * method in TracMetadataApi.
     * This is a unary method. The request must contain all the relevant fields and the
     * entire content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Clients may specify the size of the file being created. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The method returns the header of the newly created FILE object. Error conditions
     * include: Invalid request, unknown tenant and validation failure, file too large and
     * data loss (if the number of bytes stored does not match the number specified in the
     * request). Storage errors may also be reported if there is a problem communicating with
     * the underlying storage technology. In the event of an error, TRAC will do its best to
     * clean up any partially-written data in the storage layer.
     * 
*/ public void createSmallFile(org.finos.tracdap.api.FileWriteRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getCreateSmallFileMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Upload a new version of an existing file into TRAC, sending the content as a data stream
     * Calling this method will update the relevant FILE object in the metadata store.
     * The latest version of the FILE must be supplied in the priorVersion field
     * of the request. For example if the latest version of a FILE object is version 2,
     * the priorVersion field should refer to version 2 and TRAC will create version 3
     * as a result of the update call. The metadata and content of prior versions
     * remain unaltered. The file name may be changed between versions, but the extension
     * and mime type must stay the same. Tag updates can be supplied when updating a FILE,
     * they will be passed on to the metadata service> The semantics for tag updates are
     * identical to the updateObject() method in TracMetadataApi.
     * This is a client streaming method. The first message in the request stream
     * must contain all the request fields and required metadata. The second and subsequent
     * messages should contain the content of the file as a series of byte buffers.
     * (other fields that are set after the first message will be ignored).
     * Empty chunks can be included at any point in the stream and will be ignored.
     * Clients may choose to include the first chunk in the first message along with the
     * request metadata, or to put an empty chunk in the first message and start streaming
     * content in the second message. For very small files, it is possible to put the entire
     * content in one chunk in the first message, so there is only a single message in the stream.
     * All of these approaches are supported.
     * Clients may specify the size of the file being updated. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The call returns the header for the new version of the FILE object. Error conditions
     * include: Invalid request, unknown tenant, validation failure, failed preconditions
     * (e.g. extension and mime type changes) and data loss (if the number of bytes stored
     * does not match the number specified in the request). Storage errors may also be reported
     * if there is a problem communicating with the underlying storage technology. In the event
     * of an error, TRAC will do its best to clean up any partially-written data in the storage layer.
     * 
*/ public io.grpc.stub.StreamObserver updateFile( io.grpc.stub.StreamObserver responseObserver) { return io.grpc.stub.ClientCalls.asyncClientStreamingCall( getChannel().newCall(getUpdateFileMethod(), getCallOptions()), responseObserver); } /** *
     **
     * Upload a new version of an existing file into TRAC, sending the content as a single blob
     * Calling this method will update the relevant FILE object in the metadata store.
     * The latest version of the FILE must be supplied in the priorVersion field
     * of the request. For example if the latest version of a FILE object is version 2,
     * the priorVersion field should refer to version 2 and TRAC will create version 3
     * as a result of the update call. The metadata and content of prior versions
     * remain unaltered. The file name may be changed between versions, but the extension
     * and mime type must stay the same. Tag updates can be supplied when updating a FILE,
     * they will be passed on to the metadata service> The semantics for tag updates are
     * identical to the updateObject() method in TracMetadataApi.
     * This is a unary call. The request must contain all the relevant fields and the
     * entire content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Clients may specify the size of the file being updated. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The call returns the header for the new version of the FILE object. Error conditions
     * include: Invalid request, unknown tenant, validation failure, failed preconditions
     * (e.g. extension and mime type changes) file too large and data loss (if the number of
     * bytes stored does not match the number specified in the request). Storage errors may also
     * be reported if there is a problem communicating with the underlying storage technology.
     * In the event of an error, TRAC will do its best to clean up any partially-written data in
     * the storage layer.
     * 
*/ public void updateSmallFile(org.finos.tracdap.api.FileWriteRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getUpdateSmallFileMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Download a file that has been stored in TRAC and return it as a data stream
     * The request uses a regular TagSelector to indicate which file to read. The
     * semantics of the request are identical to the readObject() method in
     * TracMetadataApi.
     * This is a server streaming method. The first message in the response stream will
     * contain the response metadata (i.e. the file definition). The second
     * and subsequent messages will deliver the content of the file as a stream of chunks
     * (byte buffers). Empty chunks may be included at any point in the stream and
     * should be ignored. In particular, TRAC guarantees that the chunk in the first
     * message will always be an empty chunk. Clients are free to ignore this chunk,
     * for example if they have a separate function for processing the first message in
     * the response stream. Alternatively clients may process the empty chunk in the firs
     * message in the same way as any other chunk. Both approaches are supported.
     * Error conditions include: Invalid request, unknown tenant, unknown object ID,
     * object type does not match ID, unknown object version, unknown tag version.
     * Storage errors may also be reported if there is a problem communicating with the
     * underlying storage technology.
     * 
*/ public void readFile(org.finos.tracdap.api.FileReadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncServerStreamingCall( getChannel().newCall(getReadFileMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Download a file that has been stored in TRAC and return it as a single blob
     * The request uses a regular TagSelector to indicate which file to read. The
     * semantics of the request are identical to the readObject() method in
     * TracMetadataApi.
     * This is a unary method, the response will contain the file definition and the
     * whole content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Error conditions include: Invalid request, unknown tenant, unknown object ID,
     * object type does not match ID, unknown object version, unknown tag version,
     * file too large. Storage errors may also be reported if there is a problem
     * communicating with the underlying storage technology.
     * 
*/ public void readSmallFile(org.finos.tracdap.api.FileReadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getReadSmallFileMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Download a file as a data stream
     * This method is intended for use by browsers and other HTTP clients
     * to download a file using an HTTP GET request.
     * 
*/ public void downloadFile(org.finos.tracdap.api.DownloadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncServerStreamingCall( getChannel().newCall(getDownloadFileMethod(), getCallOptions()), request, responseObserver); } /** *
     **
     * Download the latest version of a file as a data stream
     * This method is intended for use by browsers and other HTTP clients
     * to download a file using an HTTP GET request.
     * 
*/ public void downloadLatestFile(org.finos.tracdap.api.DownloadRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncServerStreamingCall( getChannel().newCall(getDownloadLatestFileMethod(), getCallOptions()), request, responseObserver); } } /** * A stub to allow clients to do synchronous rpc calls to service TracDataApi. *
   **
   * Public API for creating, updating, reading and querying primary data stored in the TRAC platform.
   * The TRAC data API provides a standard mechanism for client applications to store and access data
   * in the TRAC platform. Calls are translated into the underlying storage mechanisms, using push-down
   * operations where possible for efficient queries on large datasets.
   * The data API includes format translation, so data can be uploaded and retrieved in any supported
   * format. The back-end storage format is controlled by the platform. For example if a user uploads
   * a CSV file TRAC will convert it to the default storage format (by default Arrow IPC file format).
   * Later a web application might ask for that data in JSON format and TRAC would again perform the
   * conversion. The platform uses Apache Arrow as an intermediate representation that other formats
   * are converted from and to.
   * The data API uses streaming operations to support transfer of large datasets and can be used for
   * both user-facing client applications and system-to-system integration. For regular, high-volume
   * data transfers there are other options for integration, including data import jobs and direct
   * back-end integration of the underlying storage technologies. These options can be faster and
   * reduce storage requirements, at the expense of tighter coupling between systems. A compromise is
   * to use direct integration and import jobs for a small number of critical feeds containing a
   * high volume of data, and use the data API for client access and for integration of secondary
   * and/or low-volume systems.
   * 
*/ public static final class TracDataApiBlockingStub extends io.grpc.stub.AbstractBlockingStub { private TracDataApiBlockingStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected TracDataApiBlockingStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new TracDataApiBlockingStub(channel, callOptions); } /** *
     **
     * Create a new dataset, supplying the schema and content as a single blob
     * This method creates a new dataset and a corresponding DATA object
     * in the TRAC metadata store. Once a dataset is created it can be used as an
     * input into a model run, it can also be read and queried using the data API.
     * Data can be supplied in any format supported by the platform.
     * The request must specify a schema for the dataset, incoming data will be
     * verified against the schema. Schemas can be specified using either:
     *    * A full schema definition - if a full schema is supplied, it will be embedded
     *      with the dataset and used for this dataset only
     *    * A schema ID - a tag selector for an existing SCHEMA object, which may be
     *      shared by multiple datasets
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. TRAC will apply format conversion before the data is
     * processed and stored.
     * Tag updates can be supplied to tag the newly created dataset, they behave exactly
     * the same as tag updates in the createObject() call of TracMetadataApi.
     * This is a unary call, all the request fields and metadata (including schema specifier)
     * and dataset content encoded as per the "format" field are supplied in a single message.
     * It is intended for working with small datasets and for use in environments where client
     * streaming is not available (particularly in gRPC-Web clients).
     * This method returns the header of the newly created DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), format not supported, data does not match schema, corrupt or invalid
     * data stream. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology. In the event of an error, TRAC will do its
     * best to clean up any partially-written data in the storage layer.
     * 
*/ public org.finos.tracdap.metadata.TagHeader createSmallDataset(org.finos.tracdap.api.DataWriteRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getCreateSmallDatasetMethod(), getCallOptions(), request); } /** *
     **
     * Update an existing dataset, supplying the schema and content as a single blob
     * This method updates an existing dataset and the corresponding DATA object
     * in the TRAC metadata store. As per the TRAC immutability guarantee, the original
     * version of the dataset is not altered. After an update, both the original version
     * and the new version are available to use as inputs into a model runs and to read
     * and query using the data API. Data can be supplied in any format supported by the
     * platform.
     * To update a dataset, the priorVersion field must indicate the dataset being updated.
     * Only the latest version of a dataset can be updated.
     * The request must specify a schema for the new version of the dataset, incoming data
     * will be verified against the schema. The new schema must be compatible with the schema
     * of the previous version. Schemas can be specified using either:
     *    * A full schema definition - Datasets created using an embedded schema must supply
     *      a full schema for all subsequent versions and each schema version must be compatible
     *      with the version before. Fields may be added, but not removed or altered.
     *    * A schema ID - Datasets created using an external schema must use the same external
     *      schema ID for all subsequent versions. It is permitted for later versions of a
     *      dataset to use later versions of the external schema, but not earlier versions.
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. It is not necessary for different versions of the same dataset
     * to be uploaded using the same format. TRAC will apply format conversion before the
     * data is processed and stored.
     * Tag updates can be supplied to tag the new version of the dataset, they behave exactly
     * the same as tag updates in the updateObject() call of TracMetadataApi.
     * This is a unary call, all the request fields and metadata (including schema specifier)
     * and dataset content encoded as per the "format" field are supplied in a single message.
     * It is intended for working with small datasets and for use in environments where client
     * streaming is not available (particularly in gRPC-Web clients).
     * This method returns the header of the version of the DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), schema version not compatible, format not supported, data does not match
     * schema, corrupt or invalid data stream. Storage errors may also be reported if there is
     * a problem communicating with the underlying storage technology. In the event of an error,
     * TRAC will do its best to clean up any partially-written data in the storage layer.
     * 
*/ public org.finos.tracdap.metadata.TagHeader updateSmallDataset(org.finos.tracdap.api.DataWriteRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getUpdateSmallDatasetMethod(), getCallOptions(), request); } /** *
     **
     * Read an existing dataset, returning the content as a data stream
     * This method reads the contents of an existing dataset and returns it in the
     * requested format, along with a copy of the data schema. Data can be requested
     * in any format supported by the platform.
     * The request uses a regular TagSelector to indicate which dataset and version to read.
     * The format parameter is a mime type and must be a supported data format.
     * This is a server streaming method. The first message in the response stream will
     * contain a schema definition for the dataset (this may come from an embedded schema
     * or an external schema object). The second and subsequent messages will deliver the
     * content of the dataset in the requested format. TRAC guarantees that the first message
     * will always contain an empty chunk of content, which can be safely ignored.
     * Error conditions include: Invalid request, unknown tenant, object not found, format
     * not supported. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology.
     * 
*/ public java.util.Iterator readDataset( org.finos.tracdap.api.DataReadRequest request) { return io.grpc.stub.ClientCalls.blockingServerStreamingCall( getChannel(), getReadDatasetMethod(), getCallOptions(), request); } /** *
     **
     * Read an existing dataset, returning the content as a single blob
     * This method reads the contents of an existing dataset and returns it in the
     * requested format, along with a copy of the data schema. Data can be requested
     * in any format supported by the platform.
     * The request uses a regular TagSelector to indicate which dataset and version to read.
     * The format parameter is a mime type and must be a supported data format.
     * This is a unary call, both the schema and the content of the dataset are returned
     * in a single response message. The content of the dataset will be encoded in the
     * requested format. Errors may occur if the content of the dataset is too large to
     * fit in a single message frame.
     * Error conditions include: Invalid request, unknown tenant, object not found, format
     * not supported. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology.
     * 
*/ public org.finos.tracdap.api.DataReadResponse readSmallDataset(org.finos.tracdap.api.DataReadRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getReadSmallDatasetMethod(), getCallOptions(), request); } /** *
     **
     * Upload a new file into TRAC, sending the content as a single blob
     * Calling this method will create a new FILE object in the metadata store.
     * Tag updates can be supplied when creating a FILE, they will be passed on to the
     * metadata service. The semantics for tag updates are identical to the createObject()
     * method in TracMetadataApi.
     * This is a unary method. The request must contain all the relevant fields and the
     * entire content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Clients may specify the size of the file being created. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The method returns the header of the newly created FILE object. Error conditions
     * include: Invalid request, unknown tenant and validation failure, file too large and
     * data loss (if the number of bytes stored does not match the number specified in the
     * request). Storage errors may also be reported if there is a problem communicating with
     * the underlying storage technology. In the event of an error, TRAC will do its best to
     * clean up any partially-written data in the storage layer.
     * 
*/ public org.finos.tracdap.metadata.TagHeader createSmallFile(org.finos.tracdap.api.FileWriteRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getCreateSmallFileMethod(), getCallOptions(), request); } /** *
     **
     * Upload a new version of an existing file into TRAC, sending the content as a single blob
     * Calling this method will update the relevant FILE object in the metadata store.
     * The latest version of the FILE must be supplied in the priorVersion field
     * of the request. For example if the latest version of a FILE object is version 2,
     * the priorVersion field should refer to version 2 and TRAC will create version 3
     * as a result of the update call. The metadata and content of prior versions
     * remain unaltered. The file name may be changed between versions, but the extension
     * and mime type must stay the same. Tag updates can be supplied when updating a FILE,
     * they will be passed on to the metadata service> The semantics for tag updates are
     * identical to the updateObject() method in TracMetadataApi.
     * This is a unary call. The request must contain all the relevant fields and the
     * entire content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Clients may specify the size of the file being updated. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The call returns the header for the new version of the FILE object. Error conditions
     * include: Invalid request, unknown tenant, validation failure, failed preconditions
     * (e.g. extension and mime type changes) file too large and data loss (if the number of
     * bytes stored does not match the number specified in the request). Storage errors may also
     * be reported if there is a problem communicating with the underlying storage technology.
     * In the event of an error, TRAC will do its best to clean up any partially-written data in
     * the storage layer.
     * 
*/ public org.finos.tracdap.metadata.TagHeader updateSmallFile(org.finos.tracdap.api.FileWriteRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getUpdateSmallFileMethod(), getCallOptions(), request); } /** *
     **
     * Download a file that has been stored in TRAC and return it as a data stream
     * The request uses a regular TagSelector to indicate which file to read. The
     * semantics of the request are identical to the readObject() method in
     * TracMetadataApi.
     * This is a server streaming method. The first message in the response stream will
     * contain the response metadata (i.e. the file definition). The second
     * and subsequent messages will deliver the content of the file as a stream of chunks
     * (byte buffers). Empty chunks may be included at any point in the stream and
     * should be ignored. In particular, TRAC guarantees that the chunk in the first
     * message will always be an empty chunk. Clients are free to ignore this chunk,
     * for example if they have a separate function for processing the first message in
     * the response stream. Alternatively clients may process the empty chunk in the firs
     * message in the same way as any other chunk. Both approaches are supported.
     * Error conditions include: Invalid request, unknown tenant, unknown object ID,
     * object type does not match ID, unknown object version, unknown tag version.
     * Storage errors may also be reported if there is a problem communicating with the
     * underlying storage technology.
     * 
*/ public java.util.Iterator readFile( org.finos.tracdap.api.FileReadRequest request) { return io.grpc.stub.ClientCalls.blockingServerStreamingCall( getChannel(), getReadFileMethod(), getCallOptions(), request); } /** *
     **
     * Download a file that has been stored in TRAC and return it as a single blob
     * The request uses a regular TagSelector to indicate which file to read. The
     * semantics of the request are identical to the readObject() method in
     * TracMetadataApi.
     * This is a unary method, the response will contain the file definition and the
     * whole content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Error conditions include: Invalid request, unknown tenant, unknown object ID,
     * object type does not match ID, unknown object version, unknown tag version,
     * file too large. Storage errors may also be reported if there is a problem
     * communicating with the underlying storage technology.
     * 
*/ public org.finos.tracdap.api.FileReadResponse readSmallFile(org.finos.tracdap.api.FileReadRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getReadSmallFileMethod(), getCallOptions(), request); } /** *
     **
     * Download a file as a data stream
     * This method is intended for use by browsers and other HTTP clients
     * to download a file using an HTTP GET request.
     * 
*/ public java.util.Iterator downloadFile( org.finos.tracdap.api.DownloadRequest request) { return io.grpc.stub.ClientCalls.blockingServerStreamingCall( getChannel(), getDownloadFileMethod(), getCallOptions(), request); } /** *
     **
     * Download the latest version of a file as a data stream
     * This method is intended for use by browsers and other HTTP clients
     * to download a file using an HTTP GET request.
     * 
*/ public java.util.Iterator downloadLatestFile( org.finos.tracdap.api.DownloadRequest request) { return io.grpc.stub.ClientCalls.blockingServerStreamingCall( getChannel(), getDownloadLatestFileMethod(), getCallOptions(), request); } } /** * A stub to allow clients to do ListenableFuture-style rpc calls to service TracDataApi. *
   **
   * Public API for creating, updating, reading and querying primary data stored in the TRAC platform.
   * The TRAC data API provides a standard mechanism for client applications to store and access data
   * in the TRAC platform. Calls are translated into the underlying storage mechanisms, using push-down
   * operations where possible for efficient queries on large datasets.
   * The data API includes format translation, so data can be uploaded and retrieved in any supported
   * format. The back-end storage format is controlled by the platform. For example if a user uploads
   * a CSV file TRAC will convert it to the default storage format (by default Arrow IPC file format).
   * Later a web application might ask for that data in JSON format and TRAC would again perform the
   * conversion. The platform uses Apache Arrow as an intermediate representation that other formats
   * are converted from and to.
   * The data API uses streaming operations to support transfer of large datasets and can be used for
   * both user-facing client applications and system-to-system integration. For regular, high-volume
   * data transfers there are other options for integration, including data import jobs and direct
   * back-end integration of the underlying storage technologies. These options can be faster and
   * reduce storage requirements, at the expense of tighter coupling between systems. A compromise is
   * to use direct integration and import jobs for a small number of critical feeds containing a
   * high volume of data, and use the data API for client access and for integration of secondary
   * and/or low-volume systems.
   * 
*/ public static final class TracDataApiFutureStub extends io.grpc.stub.AbstractFutureStub { private TracDataApiFutureStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected TracDataApiFutureStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new TracDataApiFutureStub(channel, callOptions); } /** *
     **
     * Create a new dataset, supplying the schema and content as a single blob
     * This method creates a new dataset and a corresponding DATA object
     * in the TRAC metadata store. Once a dataset is created it can be used as an
     * input into a model run, it can also be read and queried using the data API.
     * Data can be supplied in any format supported by the platform.
     * The request must specify a schema for the dataset, incoming data will be
     * verified against the schema. Schemas can be specified using either:
     *    * A full schema definition - if a full schema is supplied, it will be embedded
     *      with the dataset and used for this dataset only
     *    * A schema ID - a tag selector for an existing SCHEMA object, which may be
     *      shared by multiple datasets
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. TRAC will apply format conversion before the data is
     * processed and stored.
     * Tag updates can be supplied to tag the newly created dataset, they behave exactly
     * the same as tag updates in the createObject() call of TracMetadataApi.
     * This is a unary call, all the request fields and metadata (including schema specifier)
     * and dataset content encoded as per the "format" field are supplied in a single message.
     * It is intended for working with small datasets and for use in environments where client
     * streaming is not available (particularly in gRPC-Web clients).
     * This method returns the header of the newly created DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), format not supported, data does not match schema, corrupt or invalid
     * data stream. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology. In the event of an error, TRAC will do its
     * best to clean up any partially-written data in the storage layer.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture createSmallDataset( org.finos.tracdap.api.DataWriteRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getCreateSmallDatasetMethod(), getCallOptions()), request); } /** *
     **
     * Update an existing dataset, supplying the schema and content as a single blob
     * This method updates an existing dataset and the corresponding DATA object
     * in the TRAC metadata store. As per the TRAC immutability guarantee, the original
     * version of the dataset is not altered. After an update, both the original version
     * and the new version are available to use as inputs into a model runs and to read
     * and query using the data API. Data can be supplied in any format supported by the
     * platform.
     * To update a dataset, the priorVersion field must indicate the dataset being updated.
     * Only the latest version of a dataset can be updated.
     * The request must specify a schema for the new version of the dataset, incoming data
     * will be verified against the schema. The new schema must be compatible with the schema
     * of the previous version. Schemas can be specified using either:
     *    * A full schema definition - Datasets created using an embedded schema must supply
     *      a full schema for all subsequent versions and each schema version must be compatible
     *      with the version before. Fields may be added, but not removed or altered.
     *    * A schema ID - Datasets created using an external schema must use the same external
     *      schema ID for all subsequent versions. It is permitted for later versions of a
     *      dataset to use later versions of the external schema, but not earlier versions.
     * The "format" parameter describes the format used to upload data. For example,
     * to upload a CSV file the format would be set to "text/csv" and the file content
     * can be uploaded directly, or to upload the output of an editor grid in a web
     * client the format can be set to "text/json" to upload a JSON representation of
     * the editor contents. It is not necessary for different versions of the same dataset
     * to be uploaded using the same format. TRAC will apply format conversion before the
     * data is processed and stored.
     * Tag updates can be supplied to tag the new version of the dataset, they behave exactly
     * the same as tag updates in the updateObject() call of TracMetadataApi.
     * This is a unary call, all the request fields and metadata (including schema specifier)
     * and dataset content encoded as per the "format" field are supplied in a single message.
     * It is intended for working with small datasets and for use in environments where client
     * streaming is not available (particularly in gRPC-Web clients).
     * This method returns the header of the version of the DATA object. Error conditions
     * include: Invalid request, unknown tenant, schema not found (if an external schema
     * ID is used), schema version not compatible, format not supported, data does not match
     * schema, corrupt or invalid data stream. Storage errors may also be reported if there is
     * a problem communicating with the underlying storage technology. In the event of an error,
     * TRAC will do its best to clean up any partially-written data in the storage layer.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture updateSmallDataset( org.finos.tracdap.api.DataWriteRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getUpdateSmallDatasetMethod(), getCallOptions()), request); } /** *
     **
     * Read an existing dataset, returning the content as a single blob
     * This method reads the contents of an existing dataset and returns it in the
     * requested format, along with a copy of the data schema. Data can be requested
     * in any format supported by the platform.
     * The request uses a regular TagSelector to indicate which dataset and version to read.
     * The format parameter is a mime type and must be a supported data format.
     * This is a unary call, both the schema and the content of the dataset are returned
     * in a single response message. The content of the dataset will be encoded in the
     * requested format. Errors may occur if the content of the dataset is too large to
     * fit in a single message frame.
     * Error conditions include: Invalid request, unknown tenant, object not found, format
     * not supported. Storage errors may also be reported if there is a problem communicating
     * with the underlying storage technology.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture readSmallDataset( org.finos.tracdap.api.DataReadRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getReadSmallDatasetMethod(), getCallOptions()), request); } /** *
     **
     * Upload a new file into TRAC, sending the content as a single blob
     * Calling this method will create a new FILE object in the metadata store.
     * Tag updates can be supplied when creating a FILE, they will be passed on to the
     * metadata service. The semantics for tag updates are identical to the createObject()
     * method in TracMetadataApi.
     * This is a unary method. The request must contain all the relevant fields and the
     * entire content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Clients may specify the size of the file being created. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The method returns the header of the newly created FILE object. Error conditions
     * include: Invalid request, unknown tenant and validation failure, file too large and
     * data loss (if the number of bytes stored does not match the number specified in the
     * request). Storage errors may also be reported if there is a problem communicating with
     * the underlying storage technology. In the event of an error, TRAC will do its best to
     * clean up any partially-written data in the storage layer.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture createSmallFile( org.finos.tracdap.api.FileWriteRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getCreateSmallFileMethod(), getCallOptions()), request); } /** *
     **
     * Upload a new version of an existing file into TRAC, sending the content as a single blob
     * Calling this method will update the relevant FILE object in the metadata store.
     * The latest version of the FILE must be supplied in the priorVersion field
     * of the request. For example if the latest version of a FILE object is version 2,
     * the priorVersion field should refer to version 2 and TRAC will create version 3
     * as a result of the update call. The metadata and content of prior versions
     * remain unaltered. The file name may be changed between versions, but the extension
     * and mime type must stay the same. Tag updates can be supplied when updating a FILE,
     * they will be passed on to the metadata service> The semantics for tag updates are
     * identical to the updateObject() method in TracMetadataApi.
     * This is a unary call. The request must contain all the relevant fields and the
     * entire content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Clients may specify the size of the file being updated. When a size is supplied, TRAC
     * will check the size against the number of bytes stored. If the stored file size does not
     * match the supplied value, the error will be reported with an error status of DATA_LOSS.
     * When no size is supplied the check cannot be performed.
     * The call returns the header for the new version of the FILE object. Error conditions
     * include: Invalid request, unknown tenant, validation failure, failed preconditions
     * (e.g. extension and mime type changes) file too large and data loss (if the number of
     * bytes stored does not match the number specified in the request). Storage errors may also
     * be reported if there is a problem communicating with the underlying storage technology.
     * In the event of an error, TRAC will do its best to clean up any partially-written data in
     * the storage layer.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture updateSmallFile( org.finos.tracdap.api.FileWriteRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getUpdateSmallFileMethod(), getCallOptions()), request); } /** *
     **
     * Download a file that has been stored in TRAC and return it as a single blob
     * The request uses a regular TagSelector to indicate which file to read. The
     * semantics of the request are identical to the readObject() method in
     * TracMetadataApi.
     * This is a unary method, the response will contain the file definition and the
     * whole content of the file in a single message. Errors may occur if the file is
     * too large to fit in a single message frame.
     * Error conditions include: Invalid request, unknown tenant, unknown object ID,
     * object type does not match ID, unknown object version, unknown tag version,
     * file too large. Storage errors may also be reported if there is a problem
     * communicating with the underlying storage technology.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture readSmallFile( org.finos.tracdap.api.FileReadRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getReadSmallFileMethod(), getCallOptions()), request); } } private static final int METHODID_CREATE_SMALL_DATASET = 0; private static final int METHODID_UPDATE_SMALL_DATASET = 1; private static final int METHODID_READ_DATASET = 2; private static final int METHODID_READ_SMALL_DATASET = 3; private static final int METHODID_CREATE_SMALL_FILE = 4; private static final int METHODID_UPDATE_SMALL_FILE = 5; private static final int METHODID_READ_FILE = 6; private static final int METHODID_READ_SMALL_FILE = 7; private static final int METHODID_DOWNLOAD_FILE = 8; private static final int METHODID_DOWNLOAD_LATEST_FILE = 9; private static final int METHODID_CREATE_DATASET = 10; private static final int METHODID_UPDATE_DATASET = 11; private static final int METHODID_CREATE_FILE = 12; private static final int METHODID_UPDATE_FILE = 13; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, io.grpc.stub.ServerCalls.ServerStreamingMethod, io.grpc.stub.ServerCalls.ClientStreamingMethod, io.grpc.stub.ServerCalls.BidiStreamingMethod { private final AsyncService serviceImpl; private final int methodId; MethodHandlers(AsyncService serviceImpl, int methodId) { this.serviceImpl = serviceImpl; this.methodId = methodId; } @java.lang.Override @java.lang.SuppressWarnings("unchecked") public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { switch (methodId) { case METHODID_CREATE_SMALL_DATASET: serviceImpl.createSmallDataset((org.finos.tracdap.api.DataWriteRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_UPDATE_SMALL_DATASET: serviceImpl.updateSmallDataset((org.finos.tracdap.api.DataWriteRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_READ_DATASET: serviceImpl.readDataset((org.finos.tracdap.api.DataReadRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_READ_SMALL_DATASET: serviceImpl.readSmallDataset((org.finos.tracdap.api.DataReadRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_CREATE_SMALL_FILE: serviceImpl.createSmallFile((org.finos.tracdap.api.FileWriteRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_UPDATE_SMALL_FILE: serviceImpl.updateSmallFile((org.finos.tracdap.api.FileWriteRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_READ_FILE: serviceImpl.readFile((org.finos.tracdap.api.FileReadRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_READ_SMALL_FILE: serviceImpl.readSmallFile((org.finos.tracdap.api.FileReadRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_DOWNLOAD_FILE: serviceImpl.downloadFile((org.finos.tracdap.api.DownloadRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_DOWNLOAD_LATEST_FILE: serviceImpl.downloadLatestFile((org.finos.tracdap.api.DownloadRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; default: throw new AssertionError(); } } @java.lang.Override @java.lang.SuppressWarnings("unchecked") public io.grpc.stub.StreamObserver invoke( io.grpc.stub.StreamObserver responseObserver) { switch (methodId) { case METHODID_CREATE_DATASET: return (io.grpc.stub.StreamObserver) serviceImpl.createDataset( (io.grpc.stub.StreamObserver) responseObserver); case METHODID_UPDATE_DATASET: return (io.grpc.stub.StreamObserver) serviceImpl.updateDataset( (io.grpc.stub.StreamObserver) responseObserver); case METHODID_CREATE_FILE: return (io.grpc.stub.StreamObserver) serviceImpl.createFile( (io.grpc.stub.StreamObserver) responseObserver); case METHODID_UPDATE_FILE: return (io.grpc.stub.StreamObserver) serviceImpl.updateFile( (io.grpc.stub.StreamObserver) responseObserver); default: throw new AssertionError(); } } } public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) .addMethod( getCreateDatasetMethod(), io.grpc.stub.ServerCalls.asyncClientStreamingCall( new MethodHandlers< org.finos.tracdap.api.DataWriteRequest, org.finos.tracdap.metadata.TagHeader>( service, METHODID_CREATE_DATASET))) .addMethod( getCreateSmallDatasetMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< org.finos.tracdap.api.DataWriteRequest, org.finos.tracdap.metadata.TagHeader>( service, METHODID_CREATE_SMALL_DATASET))) .addMethod( getUpdateDatasetMethod(), io.grpc.stub.ServerCalls.asyncClientStreamingCall( new MethodHandlers< org.finos.tracdap.api.DataWriteRequest, org.finos.tracdap.metadata.TagHeader>( service, METHODID_UPDATE_DATASET))) .addMethod( getUpdateSmallDatasetMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< org.finos.tracdap.api.DataWriteRequest, org.finos.tracdap.metadata.TagHeader>( service, METHODID_UPDATE_SMALL_DATASET))) .addMethod( getReadDatasetMethod(), io.grpc.stub.ServerCalls.asyncServerStreamingCall( new MethodHandlers< org.finos.tracdap.api.DataReadRequest, org.finos.tracdap.api.DataReadResponse>( service, METHODID_READ_DATASET))) .addMethod( getReadSmallDatasetMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< org.finos.tracdap.api.DataReadRequest, org.finos.tracdap.api.DataReadResponse>( service, METHODID_READ_SMALL_DATASET))) .addMethod( getCreateFileMethod(), io.grpc.stub.ServerCalls.asyncClientStreamingCall( new MethodHandlers< org.finos.tracdap.api.FileWriteRequest, org.finos.tracdap.metadata.TagHeader>( service, METHODID_CREATE_FILE))) .addMethod( getCreateSmallFileMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< org.finos.tracdap.api.FileWriteRequest, org.finos.tracdap.metadata.TagHeader>( service, METHODID_CREATE_SMALL_FILE))) .addMethod( getUpdateFileMethod(), io.grpc.stub.ServerCalls.asyncClientStreamingCall( new MethodHandlers< org.finos.tracdap.api.FileWriteRequest, org.finos.tracdap.metadata.TagHeader>( service, METHODID_UPDATE_FILE))) .addMethod( getUpdateSmallFileMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< org.finos.tracdap.api.FileWriteRequest, org.finos.tracdap.metadata.TagHeader>( service, METHODID_UPDATE_SMALL_FILE))) .addMethod( getReadFileMethod(), io.grpc.stub.ServerCalls.asyncServerStreamingCall( new MethodHandlers< org.finos.tracdap.api.FileReadRequest, org.finos.tracdap.api.FileReadResponse>( service, METHODID_READ_FILE))) .addMethod( getReadSmallFileMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< org.finos.tracdap.api.FileReadRequest, org.finos.tracdap.api.FileReadResponse>( service, METHODID_READ_SMALL_FILE))) .addMethod( getDownloadFileMethod(), io.grpc.stub.ServerCalls.asyncServerStreamingCall( new MethodHandlers< org.finos.tracdap.api.DownloadRequest, org.finos.tracdap.api.DownloadResponse>( service, METHODID_DOWNLOAD_FILE))) .addMethod( getDownloadLatestFileMethod(), io.grpc.stub.ServerCalls.asyncServerStreamingCall( new MethodHandlers< org.finos.tracdap.api.DownloadRequest, org.finos.tracdap.api.DownloadResponse>( service, METHODID_DOWNLOAD_LATEST_FILE))) .build(); } private static abstract class TracDataApiBaseDescriptorSupplier implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { TracDataApiBaseDescriptorSupplier() {} @java.lang.Override public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { return org.finos.tracdap.api.Data.getDescriptor(); } @java.lang.Override public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { return getFileDescriptor().findServiceByName("TracDataApi"); } } private static final class TracDataApiFileDescriptorSupplier extends TracDataApiBaseDescriptorSupplier { TracDataApiFileDescriptorSupplier() {} } private static final class TracDataApiMethodDescriptorSupplier extends TracDataApiBaseDescriptorSupplier implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { private final java.lang.String methodName; TracDataApiMethodDescriptorSupplier(java.lang.String methodName) { this.methodName = methodName; } @java.lang.Override public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { return getServiceDescriptor().findMethodByName(methodName); } } private static volatile io.grpc.ServiceDescriptor serviceDescriptor; public static io.grpc.ServiceDescriptor getServiceDescriptor() { io.grpc.ServiceDescriptor result = serviceDescriptor; if (result == null) { synchronized (TracDataApiGrpc.class) { result = serviceDescriptor; if (result == null) { serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) .setSchemaDescriptor(new TracDataApiFileDescriptorSupplier()) .addMethod(getCreateDatasetMethod()) .addMethod(getCreateSmallDatasetMethod()) .addMethod(getUpdateDatasetMethod()) .addMethod(getUpdateSmallDatasetMethod()) .addMethod(getReadDatasetMethod()) .addMethod(getReadSmallDatasetMethod()) .addMethod(getCreateFileMethod()) .addMethod(getCreateSmallFileMethod()) .addMethod(getUpdateFileMethod()) .addMethod(getUpdateSmallFileMethod()) .addMethod(getReadFileMethod()) .addMethod(getReadSmallFileMethod()) .addMethod(getDownloadFileMethod()) .addMethod(getDownloadLatestFileMethod()) .build(); } } } return result; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy