All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.google.cloud.aiplatform.v1.LlmUtilityServiceGrpc Maven / Gradle / Ivy

There is a newer version: 3.50.0
Show newest version
/*
 * Copyright 2024 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.google.cloud.aiplatform.v1;

import static io.grpc.MethodDescriptor.generateFullMethodName;

/**
 *
 *
 * 
 * Service for LLM related utility functions.
 * 
*/ @javax.annotation.Generated( value = "by gRPC proto compiler", comments = "Source: google/cloud/aiplatform/v1/llm_utility_service.proto") @io.grpc.stub.annotations.GrpcGenerated public final class LlmUtilityServiceGrpc { private LlmUtilityServiceGrpc() {} public static final java.lang.String SERVICE_NAME = "google.cloud.aiplatform.v1.LlmUtilityService"; // Static method descriptors that strictly reflect the proto. private static volatile io.grpc.MethodDescriptor< com.google.cloud.aiplatform.v1.CountTokensRequest, com.google.cloud.aiplatform.v1.CountTokensResponse> getCountTokensMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "CountTokens", requestType = com.google.cloud.aiplatform.v1.CountTokensRequest.class, responseType = com.google.cloud.aiplatform.v1.CountTokensResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor< com.google.cloud.aiplatform.v1.CountTokensRequest, com.google.cloud.aiplatform.v1.CountTokensResponse> getCountTokensMethod() { io.grpc.MethodDescriptor< com.google.cloud.aiplatform.v1.CountTokensRequest, com.google.cloud.aiplatform.v1.CountTokensResponse> getCountTokensMethod; if ((getCountTokensMethod = LlmUtilityServiceGrpc.getCountTokensMethod) == null) { synchronized (LlmUtilityServiceGrpc.class) { if ((getCountTokensMethod = LlmUtilityServiceGrpc.getCountTokensMethod) == null) { LlmUtilityServiceGrpc.getCountTokensMethod = getCountTokensMethod = io.grpc.MethodDescriptor . newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CountTokens")) .setSampledToLocalTracing(true) .setRequestMarshaller( io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.aiplatform.v1.CountTokensRequest .getDefaultInstance())) .setResponseMarshaller( io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.aiplatform.v1.CountTokensResponse .getDefaultInstance())) .setSchemaDescriptor( new LlmUtilityServiceMethodDescriptorSupplier("CountTokens")) .build(); } } } return getCountTokensMethod; } private static volatile io.grpc.MethodDescriptor< com.google.cloud.aiplatform.v1.ComputeTokensRequest, com.google.cloud.aiplatform.v1.ComputeTokensResponse> getComputeTokensMethod; @io.grpc.stub.annotations.RpcMethod( fullMethodName = SERVICE_NAME + '/' + "ComputeTokens", requestType = com.google.cloud.aiplatform.v1.ComputeTokensRequest.class, responseType = com.google.cloud.aiplatform.v1.ComputeTokensResponse.class, methodType = io.grpc.MethodDescriptor.MethodType.UNARY) public static io.grpc.MethodDescriptor< com.google.cloud.aiplatform.v1.ComputeTokensRequest, com.google.cloud.aiplatform.v1.ComputeTokensResponse> getComputeTokensMethod() { io.grpc.MethodDescriptor< com.google.cloud.aiplatform.v1.ComputeTokensRequest, com.google.cloud.aiplatform.v1.ComputeTokensResponse> getComputeTokensMethod; if ((getComputeTokensMethod = LlmUtilityServiceGrpc.getComputeTokensMethod) == null) { synchronized (LlmUtilityServiceGrpc.class) { if ((getComputeTokensMethod = LlmUtilityServiceGrpc.getComputeTokensMethod) == null) { LlmUtilityServiceGrpc.getComputeTokensMethod = getComputeTokensMethod = io.grpc.MethodDescriptor . newBuilder() .setType(io.grpc.MethodDescriptor.MethodType.UNARY) .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ComputeTokens")) .setSampledToLocalTracing(true) .setRequestMarshaller( io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.aiplatform.v1.ComputeTokensRequest .getDefaultInstance())) .setResponseMarshaller( io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.aiplatform.v1.ComputeTokensResponse .getDefaultInstance())) .setSchemaDescriptor( new LlmUtilityServiceMethodDescriptorSupplier("ComputeTokens")) .build(); } } } return getComputeTokensMethod; } /** Creates a new async stub that supports all call types for the service */ public static LlmUtilityServiceStub newStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public LlmUtilityServiceStub newStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new LlmUtilityServiceStub(channel, callOptions); } }; return LlmUtilityServiceStub.newStub(factory, channel); } /** * Creates a new blocking-style stub that supports unary and streaming output calls on the service */ public static LlmUtilityServiceBlockingStub newBlockingStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public LlmUtilityServiceBlockingStub newStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new LlmUtilityServiceBlockingStub(channel, callOptions); } }; return LlmUtilityServiceBlockingStub.newStub(factory, channel); } /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ public static LlmUtilityServiceFutureStub newFutureStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = new io.grpc.stub.AbstractStub.StubFactory() { @java.lang.Override public LlmUtilityServiceFutureStub newStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new LlmUtilityServiceFutureStub(channel, callOptions); } }; return LlmUtilityServiceFutureStub.newStub(factory, channel); } /** * * *
   * Service for LLM related utility functions.
   * 
*/ public interface AsyncService { /** * * *
     * Perform a token counting.
     * 
*/ default void countTokens( com.google.cloud.aiplatform.v1.CountTokensRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( getCountTokensMethod(), responseObserver); } /** * * *
     * Return a list of tokens based on the input text.
     * 
*/ default void computeTokens( com.google.cloud.aiplatform.v1.ComputeTokensRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( getComputeTokensMethod(), responseObserver); } } /** * Base class for the server implementation of the service LlmUtilityService. * *
   * Service for LLM related utility functions.
   * 
*/ public abstract static class LlmUtilityServiceImplBase implements io.grpc.BindableService, AsyncService { @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() { return LlmUtilityServiceGrpc.bindService(this); } } /** * A stub to allow clients to do asynchronous rpc calls to service LlmUtilityService. * *
   * Service for LLM related utility functions.
   * 
*/ public static final class LlmUtilityServiceStub extends io.grpc.stub.AbstractAsyncStub { private LlmUtilityServiceStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected LlmUtilityServiceStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new LlmUtilityServiceStub(channel, callOptions); } /** * * *
     * Perform a token counting.
     * 
*/ public void countTokens( com.google.cloud.aiplatform.v1.CountTokensRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getCountTokensMethod(), getCallOptions()), request, responseObserver); } /** * * *
     * Return a list of tokens based on the input text.
     * 
*/ public void computeTokens( com.google.cloud.aiplatform.v1.ComputeTokensRequest request, io.grpc.stub.StreamObserver responseObserver) { io.grpc.stub.ClientCalls.asyncUnaryCall( getChannel().newCall(getComputeTokensMethod(), getCallOptions()), request, responseObserver); } } /** * A stub to allow clients to do synchronous rpc calls to service LlmUtilityService. * *
   * Service for LLM related utility functions.
   * 
*/ public static final class LlmUtilityServiceBlockingStub extends io.grpc.stub.AbstractBlockingStub { private LlmUtilityServiceBlockingStub( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected LlmUtilityServiceBlockingStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new LlmUtilityServiceBlockingStub(channel, callOptions); } /** * * *
     * Perform a token counting.
     * 
*/ public com.google.cloud.aiplatform.v1.CountTokensResponse countTokens( com.google.cloud.aiplatform.v1.CountTokensRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getCountTokensMethod(), getCallOptions(), request); } /** * * *
     * Return a list of tokens based on the input text.
     * 
*/ public com.google.cloud.aiplatform.v1.ComputeTokensResponse computeTokens( com.google.cloud.aiplatform.v1.ComputeTokensRequest request) { return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getComputeTokensMethod(), getCallOptions(), request); } } /** * A stub to allow clients to do ListenableFuture-style rpc calls to service LlmUtilityService. * *
   * Service for LLM related utility functions.
   * 
*/ public static final class LlmUtilityServiceFutureStub extends io.grpc.stub.AbstractFutureStub { private LlmUtilityServiceFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { super(channel, callOptions); } @java.lang.Override protected LlmUtilityServiceFutureStub build( io.grpc.Channel channel, io.grpc.CallOptions callOptions) { return new LlmUtilityServiceFutureStub(channel, callOptions); } /** * * *
     * Perform a token counting.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture< com.google.cloud.aiplatform.v1.CountTokensResponse> countTokens(com.google.cloud.aiplatform.v1.CountTokensRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getCountTokensMethod(), getCallOptions()), request); } /** * * *
     * Return a list of tokens based on the input text.
     * 
*/ public com.google.common.util.concurrent.ListenableFuture< com.google.cloud.aiplatform.v1.ComputeTokensResponse> computeTokens(com.google.cloud.aiplatform.v1.ComputeTokensRequest request) { return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getComputeTokensMethod(), getCallOptions()), request); } } private static final int METHODID_COUNT_TOKENS = 0; private static final int METHODID_COMPUTE_TOKENS = 1; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, io.grpc.stub.ServerCalls.ServerStreamingMethod, io.grpc.stub.ServerCalls.ClientStreamingMethod, io.grpc.stub.ServerCalls.BidiStreamingMethod { private final AsyncService serviceImpl; private final int methodId; MethodHandlers(AsyncService serviceImpl, int methodId) { this.serviceImpl = serviceImpl; this.methodId = methodId; } @java.lang.Override @java.lang.SuppressWarnings("unchecked") public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { switch (methodId) { case METHODID_COUNT_TOKENS: serviceImpl.countTokens( (com.google.cloud.aiplatform.v1.CountTokensRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; case METHODID_COMPUTE_TOKENS: serviceImpl.computeTokens( (com.google.cloud.aiplatform.v1.ComputeTokensRequest) request, (io.grpc.stub.StreamObserver) responseObserver); break; default: throw new AssertionError(); } } @java.lang.Override @java.lang.SuppressWarnings("unchecked") public io.grpc.stub.StreamObserver invoke( io.grpc.stub.StreamObserver responseObserver) { switch (methodId) { default: throw new AssertionError(); } } } public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) { return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) .addMethod( getCountTokensMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< com.google.cloud.aiplatform.v1.CountTokensRequest, com.google.cloud.aiplatform.v1.CountTokensResponse>( service, METHODID_COUNT_TOKENS))) .addMethod( getComputeTokensMethod(), io.grpc.stub.ServerCalls.asyncUnaryCall( new MethodHandlers< com.google.cloud.aiplatform.v1.ComputeTokensRequest, com.google.cloud.aiplatform.v1.ComputeTokensResponse>( service, METHODID_COMPUTE_TOKENS))) .build(); } private abstract static class LlmUtilityServiceBaseDescriptorSupplier implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier { LlmUtilityServiceBaseDescriptorSupplier() {} @java.lang.Override public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { return com.google.cloud.aiplatform.v1.LlmUtilityServiceProto.getDescriptor(); } @java.lang.Override public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { return getFileDescriptor().findServiceByName("LlmUtilityService"); } } private static final class LlmUtilityServiceFileDescriptorSupplier extends LlmUtilityServiceBaseDescriptorSupplier { LlmUtilityServiceFileDescriptorSupplier() {} } private static final class LlmUtilityServiceMethodDescriptorSupplier extends LlmUtilityServiceBaseDescriptorSupplier implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { private final java.lang.String methodName; LlmUtilityServiceMethodDescriptorSupplier(java.lang.String methodName) { this.methodName = methodName; } @java.lang.Override public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { return getServiceDescriptor().findMethodByName(methodName); } } private static volatile io.grpc.ServiceDescriptor serviceDescriptor; public static io.grpc.ServiceDescriptor getServiceDescriptor() { io.grpc.ServiceDescriptor result = serviceDescriptor; if (result == null) { synchronized (LlmUtilityServiceGrpc.class) { result = serviceDescriptor; if (result == null) { serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) .setSchemaDescriptor(new LlmUtilityServiceFileDescriptorSupplier()) .addMethod(getCountTokensMethod()) .addMethod(getComputeTokensMethod()) .build(); } } } return result; } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy