
software.amazon.awssdk.services.machinelearning.DefaultMachineLearningAsyncClient Maven / Gradle / Ivy
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package software.amazon.awssdk.services.machinelearning;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ScheduledExecutorService;
import java.util.function.Consumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.annotations.SdkInternalApi;
import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration;
import software.amazon.awssdk.awscore.client.handler.AwsAsyncClientHandler;
import software.amazon.awssdk.awscore.exception.AwsServiceException;
import software.amazon.awssdk.core.ApiName;
import software.amazon.awssdk.core.RequestOverrideConfiguration;
import software.amazon.awssdk.core.client.config.SdkClientConfiguration;
import software.amazon.awssdk.core.client.config.SdkClientOption;
import software.amazon.awssdk.core.client.handler.AsyncClientHandler;
import software.amazon.awssdk.core.client.handler.ClientExecutionParams;
import software.amazon.awssdk.core.http.HttpResponseHandler;
import software.amazon.awssdk.core.metrics.CoreMetric;
import software.amazon.awssdk.core.util.VersionInfo;
import software.amazon.awssdk.metrics.MetricCollector;
import software.amazon.awssdk.metrics.MetricPublisher;
import software.amazon.awssdk.metrics.NoOpMetricCollector;
import software.amazon.awssdk.protocols.core.ExceptionMetadata;
import software.amazon.awssdk.protocols.json.AwsJsonProtocol;
import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory;
import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory;
import software.amazon.awssdk.protocols.json.JsonOperationMetadata;
import software.amazon.awssdk.services.machinelearning.model.AddTagsRequest;
import software.amazon.awssdk.services.machinelearning.model.AddTagsResponse;
import software.amazon.awssdk.services.machinelearning.model.CreateBatchPredictionRequest;
import software.amazon.awssdk.services.machinelearning.model.CreateBatchPredictionResponse;
import software.amazon.awssdk.services.machinelearning.model.CreateDataSourceFromRdsRequest;
import software.amazon.awssdk.services.machinelearning.model.CreateDataSourceFromRdsResponse;
import software.amazon.awssdk.services.machinelearning.model.CreateDataSourceFromRedshiftRequest;
import software.amazon.awssdk.services.machinelearning.model.CreateDataSourceFromRedshiftResponse;
import software.amazon.awssdk.services.machinelearning.model.CreateDataSourceFromS3Request;
import software.amazon.awssdk.services.machinelearning.model.CreateDataSourceFromS3Response;
import software.amazon.awssdk.services.machinelearning.model.CreateEvaluationRequest;
import software.amazon.awssdk.services.machinelearning.model.CreateEvaluationResponse;
import software.amazon.awssdk.services.machinelearning.model.CreateMlModelRequest;
import software.amazon.awssdk.services.machinelearning.model.CreateMlModelResponse;
import software.amazon.awssdk.services.machinelearning.model.CreateRealtimeEndpointRequest;
import software.amazon.awssdk.services.machinelearning.model.CreateRealtimeEndpointResponse;
import software.amazon.awssdk.services.machinelearning.model.DeleteBatchPredictionRequest;
import software.amazon.awssdk.services.machinelearning.model.DeleteBatchPredictionResponse;
import software.amazon.awssdk.services.machinelearning.model.DeleteDataSourceRequest;
import software.amazon.awssdk.services.machinelearning.model.DeleteDataSourceResponse;
import software.amazon.awssdk.services.machinelearning.model.DeleteEvaluationRequest;
import software.amazon.awssdk.services.machinelearning.model.DeleteEvaluationResponse;
import software.amazon.awssdk.services.machinelearning.model.DeleteMlModelRequest;
import software.amazon.awssdk.services.machinelearning.model.DeleteMlModelResponse;
import software.amazon.awssdk.services.machinelearning.model.DeleteRealtimeEndpointRequest;
import software.amazon.awssdk.services.machinelearning.model.DeleteRealtimeEndpointResponse;
import software.amazon.awssdk.services.machinelearning.model.DeleteTagsRequest;
import software.amazon.awssdk.services.machinelearning.model.DeleteTagsResponse;
import software.amazon.awssdk.services.machinelearning.model.DescribeBatchPredictionsRequest;
import software.amazon.awssdk.services.machinelearning.model.DescribeBatchPredictionsResponse;
import software.amazon.awssdk.services.machinelearning.model.DescribeDataSourcesRequest;
import software.amazon.awssdk.services.machinelearning.model.DescribeDataSourcesResponse;
import software.amazon.awssdk.services.machinelearning.model.DescribeEvaluationsRequest;
import software.amazon.awssdk.services.machinelearning.model.DescribeEvaluationsResponse;
import software.amazon.awssdk.services.machinelearning.model.DescribeMlModelsRequest;
import software.amazon.awssdk.services.machinelearning.model.DescribeMlModelsResponse;
import software.amazon.awssdk.services.machinelearning.model.DescribeTagsRequest;
import software.amazon.awssdk.services.machinelearning.model.DescribeTagsResponse;
import software.amazon.awssdk.services.machinelearning.model.GetBatchPredictionRequest;
import software.amazon.awssdk.services.machinelearning.model.GetBatchPredictionResponse;
import software.amazon.awssdk.services.machinelearning.model.GetDataSourceRequest;
import software.amazon.awssdk.services.machinelearning.model.GetDataSourceResponse;
import software.amazon.awssdk.services.machinelearning.model.GetEvaluationRequest;
import software.amazon.awssdk.services.machinelearning.model.GetEvaluationResponse;
import software.amazon.awssdk.services.machinelearning.model.GetMlModelRequest;
import software.amazon.awssdk.services.machinelearning.model.GetMlModelResponse;
import software.amazon.awssdk.services.machinelearning.model.IdempotentParameterMismatchException;
import software.amazon.awssdk.services.machinelearning.model.InternalServerException;
import software.amazon.awssdk.services.machinelearning.model.InvalidInputException;
import software.amazon.awssdk.services.machinelearning.model.InvalidTagException;
import software.amazon.awssdk.services.machinelearning.model.LimitExceededException;
import software.amazon.awssdk.services.machinelearning.model.MachineLearningException;
import software.amazon.awssdk.services.machinelearning.model.MachineLearningRequest;
import software.amazon.awssdk.services.machinelearning.model.PredictRequest;
import software.amazon.awssdk.services.machinelearning.model.PredictResponse;
import software.amazon.awssdk.services.machinelearning.model.PredictorNotMountedException;
import software.amazon.awssdk.services.machinelearning.model.ResourceNotFoundException;
import software.amazon.awssdk.services.machinelearning.model.TagLimitExceededException;
import software.amazon.awssdk.services.machinelearning.model.UpdateBatchPredictionRequest;
import software.amazon.awssdk.services.machinelearning.model.UpdateBatchPredictionResponse;
import software.amazon.awssdk.services.machinelearning.model.UpdateDataSourceRequest;
import software.amazon.awssdk.services.machinelearning.model.UpdateDataSourceResponse;
import software.amazon.awssdk.services.machinelearning.model.UpdateEvaluationRequest;
import software.amazon.awssdk.services.machinelearning.model.UpdateEvaluationResponse;
import software.amazon.awssdk.services.machinelearning.model.UpdateMlModelRequest;
import software.amazon.awssdk.services.machinelearning.model.UpdateMlModelResponse;
import software.amazon.awssdk.services.machinelearning.paginators.DescribeBatchPredictionsPublisher;
import software.amazon.awssdk.services.machinelearning.paginators.DescribeDataSourcesPublisher;
import software.amazon.awssdk.services.machinelearning.paginators.DescribeEvaluationsPublisher;
import software.amazon.awssdk.services.machinelearning.paginators.DescribeMLModelsPublisher;
import software.amazon.awssdk.services.machinelearning.transform.AddTagsRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.CreateBatchPredictionRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.CreateDataSourceFromRdsRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.CreateDataSourceFromRedshiftRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.CreateDataSourceFromS3RequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.CreateEvaluationRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.CreateMlModelRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.CreateRealtimeEndpointRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DeleteBatchPredictionRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DeleteDataSourceRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DeleteEvaluationRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DeleteMlModelRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DeleteRealtimeEndpointRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DeleteTagsRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DescribeBatchPredictionsRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DescribeDataSourcesRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DescribeEvaluationsRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DescribeMlModelsRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.DescribeTagsRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.GetBatchPredictionRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.GetDataSourceRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.GetEvaluationRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.GetMlModelRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.PredictRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.UpdateBatchPredictionRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.UpdateDataSourceRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.UpdateEvaluationRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.transform.UpdateMlModelRequestMarshaller;
import software.amazon.awssdk.services.machinelearning.waiters.MachineLearningAsyncWaiter;
import software.amazon.awssdk.utils.CompletableFutureUtils;
/**
* Internal implementation of {@link MachineLearningAsyncClient}.
*
* @see MachineLearningAsyncClient#builder()
*/
@Generated("software.amazon.awssdk:codegen")
@SdkInternalApi
final class DefaultMachineLearningAsyncClient implements MachineLearningAsyncClient {
private static final Logger log = LoggerFactory.getLogger(DefaultMachineLearningAsyncClient.class);
private final AsyncClientHandler clientHandler;
private final AwsJsonProtocolFactory protocolFactory;
private final SdkClientConfiguration clientConfiguration;
private final ScheduledExecutorService executorService;
protected DefaultMachineLearningAsyncClient(SdkClientConfiguration clientConfiguration) {
this.clientHandler = new AwsAsyncClientHandler(clientConfiguration);
this.clientConfiguration = clientConfiguration;
this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build();
this.executorService = clientConfiguration.option(SdkClientOption.SCHEDULED_EXECUTOR_SERVICE);
}
@Override
public final String serviceName() {
return SERVICE_NAME;
}
/**
*
* Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you
* add a tag using a key that is already associated with the ML object, AddTags
updates the tag's
* value.
*
*
* @param addTagsRequest
* @return A Java Future containing the result of the AddTags operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InvalidTagException
* - TagLimitExceededException
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.AddTags
*/
@Override
public CompletableFuture addTags(AddTagsRequest addTagsRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, addTagsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "AddTags");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
AddTagsResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams().withOperationName("AddTags")
.withMarshaller(new AddTagsRequestMarshaller(protocolFactory)).withResponseHandler(responseHandler)
.withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector)
.withInput(addTagsRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Generates predictions for a group of observations. The observations to process exist in one or more data files
* referenced by a DataSource
. This operation creates a new BatchPrediction
, and uses an
* MLModel
and the data files referenced by the DataSource
as information sources.
*
*
* CreateBatchPrediction
is an asynchronous operation. In response to
* CreateBatchPrediction
, Amazon Machine Learning (Amazon ML) immediately returns and sets the
* BatchPrediction
status to PENDING
. After the BatchPrediction
completes,
* Amazon ML sets the status to COMPLETED
.
*
*
* You can poll for status updates by using the GetBatchPrediction operation and checking the
* Status
parameter of the result. After the COMPLETED
status appears, the results are
* available in the location specified by the OutputUri
parameter.
*
*
* @param createBatchPredictionRequest
* @return A Java Future containing the result of the CreateBatchPrediction operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - IdempotentParameterMismatchException A second request to use or change an object was not allowed.
* This can result from retrying a request using a parameter that was not present in the original request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.CreateBatchPrediction
*/
@Override
public CompletableFuture createBatchPrediction(
CreateBatchPredictionRequest createBatchPredictionRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, createBatchPredictionRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateBatchPrediction");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, CreateBatchPredictionResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("CreateBatchPrediction")
.withMarshaller(new CreateBatchPredictionRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createBatchPredictionRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Creates a DataSource
object from an Amazon Relational Database
* Service (Amazon RDS). A DataSource
references data that can be used to perform
* CreateMLModel
, CreateEvaluation
, or CreateBatchPrediction
operations.
*
*
* CreateDataSourceFromRDS
is an asynchronous operation. In response to
* CreateDataSourceFromRDS
, Amazon Machine Learning (Amazon ML) immediately returns and sets the
* DataSource
status to PENDING
. After the DataSource
is created and ready
* for use, Amazon ML sets the Status
parameter to COMPLETED
. DataSource
in
* the COMPLETED
or PENDING
state can be used only to perform
* >CreateMLModel
>, CreateEvaluation
, or CreateBatchPrediction
* operations.
*
*
* If Amazon ML cannot accept the input source, it sets the Status
parameter to FAILED
and
* includes an error message in the Message
attribute of the GetDataSource
operation
* response.
*
*
* @param createDataSourceFromRdsRequest
* @return A Java Future containing the result of the CreateDataSourceFromRDS operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - IdempotentParameterMismatchException A second request to use or change an object was not allowed.
* This can result from retrying a request using a parameter that was not present in the original request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.CreateDataSourceFromRDS
*/
@Override
public CompletableFuture createDataSourceFromRDS(
CreateDataSourceFromRdsRequest createDataSourceFromRdsRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, createDataSourceFromRdsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateDataSourceFromRDS");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, CreateDataSourceFromRdsResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("CreateDataSourceFromRDS")
.withMarshaller(new CreateDataSourceFromRdsRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createDataSourceFromRdsRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Creates a DataSource
from a database hosted on an Amazon Redshift cluster. A DataSource
* references data that can be used to perform either CreateMLModel
, CreateEvaluation
, or
* CreateBatchPrediction
operations.
*
*
* CreateDataSourceFromRedshift
is an asynchronous operation. In response to
* CreateDataSourceFromRedshift
, Amazon Machine Learning (Amazon ML) immediately returns and sets the
* DataSource
status to PENDING
. After the DataSource
is created and ready
* for use, Amazon ML sets the Status
parameter to COMPLETED
. DataSource
in
* COMPLETED
or PENDING
states can be used to perform only CreateMLModel
,
* CreateEvaluation
, or CreateBatchPrediction
operations.
*
*
* If Amazon ML can't accept the input source, it sets the Status
parameter to FAILED
and
* includes an error message in the Message
attribute of the GetDataSource
operation
* response.
*
*
* The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified
* by a SelectSqlQuery
query. Amazon ML executes an Unload
command in Amazon Redshift to
* transfer the result set of the SelectSqlQuery
query to S3StagingLocation
.
*
*
* After the DataSource
has been created, it's ready for use in evaluations and batch predictions. If
* you plan to use the DataSource
to train an MLModel
, the DataSource
also
* requires a recipe. A recipe describes how each input variable will be used in training an MLModel
.
* Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it
* be combined with another variable or will it be split apart into word combinations? The recipe provides answers
* to these questions.
*
*
* You can't change an existing datasource, but you can copy and modify the settings from an existing Amazon
* Redshift datasource to create a new datasource. To do so, call GetDataSource
for an existing
* datasource and copy the values to a CreateDataSource
call. Change the settings that you want to
* change and make sure that all required fields have the appropriate values.
*
*
* @param createDataSourceFromRedshiftRequest
* @return A Java Future containing the result of the CreateDataSourceFromRedshift operation returned by the
* service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - IdempotentParameterMismatchException A second request to use or change an object was not allowed.
* This can result from retrying a request using a parameter that was not present in the original request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.CreateDataSourceFromRedshift
*/
@Override
public CompletableFuture createDataSourceFromRedshift(
CreateDataSourceFromRedshiftRequest createDataSourceFromRedshiftRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, createDataSourceFromRedshiftRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateDataSourceFromRedshift");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, CreateDataSourceFromRedshiftResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("CreateDataSourceFromRedshift")
.withMarshaller(new CreateDataSourceFromRedshiftRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createDataSourceFromRedshiftRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Creates a DataSource
object. A DataSource
references data that can be used to perform
* CreateMLModel
, CreateEvaluation
, or CreateBatchPrediction
operations.
*
*
* CreateDataSourceFromS3
is an asynchronous operation. In response to
* CreateDataSourceFromS3
, Amazon Machine Learning (Amazon ML) immediately returns and sets the
* DataSource
status to PENDING
. After the DataSource
has been created and is
* ready for use, Amazon ML sets the Status
parameter to COMPLETED
.
* DataSource
in the COMPLETED
or PENDING
state can be used to perform only
* CreateMLModel
, CreateEvaluation
or CreateBatchPrediction
operations.
*
*
* If Amazon ML can't accept the input source, it sets the Status
parameter to FAILED
and
* includes an error message in the Message
attribute of the GetDataSource
operation
* response.
*
*
* The observation data used in a DataSource
should be ready to use; that is, it should have a
* consistent structure, and missing data values should be kept to a minimum. The observation data must reside in
* one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that
* describes the data items by name and type. The same schema must be used for all of the data files referenced by
* the DataSource
.
*
*
* After the DataSource
has been created, it's ready to use in evaluations and batch predictions. If
* you plan to use the DataSource
to train an MLModel
, the DataSource
also
* needs a recipe. A recipe describes how each input variable will be used in training an MLModel
. Will
* the variable be included or excluded from training? Will the variable be manipulated; for example, will it be
* combined with another variable or will it be split apart into word combinations? The recipe provides answers to
* these questions.
*
*
* @param createDataSourceFromS3Request
* @return A Java Future containing the result of the CreateDataSourceFromS3 operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - IdempotentParameterMismatchException A second request to use or change an object was not allowed.
* This can result from retrying a request using a parameter that was not present in the original request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.CreateDataSourceFromS3
*/
@Override
public CompletableFuture createDataSourceFromS3(
CreateDataSourceFromS3Request createDataSourceFromS3Request) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, createDataSourceFromS3Request
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateDataSourceFromS3");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, CreateDataSourceFromS3Response::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("CreateDataSourceFromS3")
.withMarshaller(new CreateDataSourceFromS3RequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createDataSourceFromS3Request));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Creates a new Evaluation
of an MLModel
. An MLModel
is evaluated on a set
* of observations associated to a DataSource
. Like a DataSource
for an
* MLModel
, the DataSource
for an Evaluation
contains values for the
* Target Variable
. The Evaluation
compares the predicted result for each observation to
* the actual outcome and provides a summary so that you know how effective the MLModel
functions on
* the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or
* MulticlassAvgFScore based on the corresponding MLModelType
: BINARY
,
* REGRESSION
or MULTICLASS
.
*
*
* CreateEvaluation
is an asynchronous operation. In response to CreateEvaluation
, Amazon
* Machine Learning (Amazon ML) immediately returns and sets the evaluation status to PENDING
. After
* the Evaluation
is created and ready for use, Amazon ML sets the status to COMPLETED
.
*
*
* You can use the GetEvaluation
operation to check progress of the evaluation during the creation
* operation.
*
*
* @param createEvaluationRequest
* @return A Java Future containing the result of the CreateEvaluation operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - IdempotentParameterMismatchException A second request to use or change an object was not allowed.
* This can result from retrying a request using a parameter that was not present in the original request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.CreateEvaluation
*/
@Override
public CompletableFuture createEvaluation(CreateEvaluationRequest createEvaluationRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, createEvaluationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateEvaluation");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, CreateEvaluationResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("CreateEvaluation")
.withMarshaller(new CreateEvaluationRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createEvaluationRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Creates a new MLModel
using the DataSource
and the recipe as information sources.
*
*
* An MLModel
is nearly immutable. Users can update only the MLModelName
and the
* ScoreThreshold
in an MLModel
without creating a new MLModel
.
*
*
* CreateMLModel
is an asynchronous operation. In response to CreateMLModel
, Amazon
* Machine Learning (Amazon ML) immediately returns and sets the MLModel
status to PENDING
* . After the MLModel
has been created and ready is for use, Amazon ML sets the status to
* COMPLETED
.
*
*
* You can use the GetMLModel
operation to check the progress of the MLModel
during the
* creation operation.
*
*
* CreateMLModel
requires a DataSource
with computed statistics, which can be created by
* setting ComputeStatistics
to true
in CreateDataSourceFromRDS
,
* CreateDataSourceFromS3
, or CreateDataSourceFromRedshift
operations.
*
*
* @param createMlModelRequest
* @return A Java Future containing the result of the CreateMLModel operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - IdempotentParameterMismatchException A second request to use or change an object was not allowed.
* This can result from retrying a request using a parameter that was not present in the original request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.CreateMLModel
*/
@Override
public CompletableFuture createMLModel(CreateMlModelRequest createMlModelRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, createMlModelRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateMLModel");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
CreateMlModelResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("CreateMLModel")
.withMarshaller(new CreateMlModelRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createMlModelRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Creates a real-time endpoint for the MLModel
. The endpoint contains the URI of the
* MLModel
; that is, the location to send real-time prediction requests for the specified
* MLModel
.
*
*
* @param createRealtimeEndpointRequest
* @return A Java Future containing the result of the CreateRealtimeEndpoint operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.CreateRealtimeEndpoint
*/
@Override
public CompletableFuture createRealtimeEndpoint(
CreateRealtimeEndpointRequest createRealtimeEndpointRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, createRealtimeEndpointRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateRealtimeEndpoint");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, CreateRealtimeEndpointResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("CreateRealtimeEndpoint")
.withMarshaller(new CreateRealtimeEndpointRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createRealtimeEndpointRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Assigns the DELETED status to a BatchPrediction
, rendering it unusable.
*
*
* After using the DeleteBatchPrediction
operation, you can use the GetBatchPrediction operation
* to verify that the status of the BatchPrediction
changed to DELETED.
*
*
* Caution: The result of the DeleteBatchPrediction
operation is irreversible.
*
*
* @param deleteBatchPredictionRequest
* @return A Java Future containing the result of the DeleteBatchPrediction operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DeleteBatchPrediction
*/
@Override
public CompletableFuture deleteBatchPrediction(
DeleteBatchPredictionRequest deleteBatchPredictionRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBatchPredictionRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBatchPrediction");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, DeleteBatchPredictionResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteBatchPrediction")
.withMarshaller(new DeleteBatchPredictionRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBatchPredictionRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Assigns the DELETED status to a DataSource
, rendering it unusable.
*
*
* After using the DeleteDataSource
operation, you can use the GetDataSource operation to verify
* that the status of the DataSource
changed to DELETED.
*
*
* Caution: The results of the DeleteDataSource
operation are irreversible.
*
*
* @param deleteDataSourceRequest
* @return A Java Future containing the result of the DeleteDataSource operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DeleteDataSource
*/
@Override
public CompletableFuture deleteDataSource(DeleteDataSourceRequest deleteDataSourceRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteDataSourceRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteDataSource");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, DeleteDataSourceResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteDataSource")
.withMarshaller(new DeleteDataSourceRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteDataSourceRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Assigns the DELETED
status to an Evaluation
, rendering it unusable.
*
*
* After invoking the DeleteEvaluation
operation, you can use the GetEvaluation
operation
* to verify that the status of the Evaluation
changed to DELETED
.
*
*
* Caution: The results of the DeleteEvaluation
operation are irreversible.
*
*
* @param deleteEvaluationRequest
* @return A Java Future containing the result of the DeleteEvaluation operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DeleteEvaluation
*/
@Override
public CompletableFuture deleteEvaluation(DeleteEvaluationRequest deleteEvaluationRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteEvaluationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteEvaluation");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, DeleteEvaluationResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteEvaluation")
.withMarshaller(new DeleteEvaluationRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteEvaluationRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Assigns the DELETED
status to an MLModel
, rendering it unusable.
*
*
* After using the DeleteMLModel
operation, you can use the GetMLModel
operation to verify
* that the status of the MLModel
changed to DELETED.
*
*
* Caution: The result of the DeleteMLModel
operation is irreversible.
*
*
* @param deleteMlModelRequest
* @return A Java Future containing the result of the DeleteMLModel operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DeleteMLModel
*/
@Override
public CompletableFuture deleteMLModel(DeleteMlModelRequest deleteMlModelRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteMlModelRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteMLModel");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
DeleteMlModelResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteMLModel")
.withMarshaller(new DeleteMlModelRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteMlModelRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Deletes a real time endpoint of an MLModel
.
*
*
* @param deleteRealtimeEndpointRequest
* @return A Java Future containing the result of the DeleteRealtimeEndpoint operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DeleteRealtimeEndpoint
*/
@Override
public CompletableFuture deleteRealtimeEndpoint(
DeleteRealtimeEndpointRequest deleteRealtimeEndpointRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteRealtimeEndpointRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteRealtimeEndpoint");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, DeleteRealtimeEndpointResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteRealtimeEndpoint")
.withMarshaller(new DeleteRealtimeEndpointRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteRealtimeEndpointRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover
* deleted tags.
*
*
* If you specify a tag that doesn't exist, Amazon ML ignores it.
*
*
* @param deleteTagsRequest
* @return A Java Future containing the result of the DeleteTags operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InvalidTagException
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DeleteTags
*/
@Override
public CompletableFuture deleteTags(DeleteTagsRequest deleteTagsRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteTagsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteTags");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
DeleteTagsResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams().withOperationName("DeleteTags")
.withMarshaller(new DeleteTagsRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteTagsRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns a list of BatchPrediction
operations that match the search criteria in the request.
*
*
* @param describeBatchPredictionsRequest
* @return A Java Future containing the result of the DescribeBatchPredictions operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeBatchPredictions
*/
@Override
public CompletableFuture describeBatchPredictions(
DescribeBatchPredictionsRequest describeBatchPredictionsRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, describeBatchPredictionsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeBatchPredictions");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, DescribeBatchPredictionsResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DescribeBatchPredictions")
.withMarshaller(new DescribeBatchPredictionsRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(describeBatchPredictionsRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns a list of BatchPrediction
operations that match the search criteria in the request.
*
*
*
* This is a variant of
* {@link #describeBatchPredictions(software.amazon.awssdk.services.machinelearning.model.DescribeBatchPredictionsRequest)}
* operation. The return type is a custom publisher that can be subscribed to request a stream of response pages.
* SDK will internally handle making service calls for you.
*
*
* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet
* and so there is no guarantee that the request is valid. If there are errors in your request, you will see the
* failures only after you start streaming the data. The subscribe method should be called as a request to start
* streaming data. For more info, see
* {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe
* method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the
* starting request.
*
*
*
* The following are few ways to use the response class:
*
* 1) Using the subscribe helper method
*
*
* {@code
* software.amazon.awssdk.services.machinelearning.paginators.DescribeBatchPredictionsPublisher publisher = client.describeBatchPredictionsPaginator(request);
* CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
* future.get();
* }
*
*
* 2) Using a custom subscriber
*
*
* {@code
* software.amazon.awssdk.services.machinelearning.paginators.DescribeBatchPredictionsPublisher publisher = client.describeBatchPredictionsPaginator(request);
* publisher.subscribe(new Subscriber() {
*
* public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
*
*
* public void onNext(software.amazon.awssdk.services.machinelearning.model.DescribeBatchPredictionsResponse response) { //... };
* });}
*
*
* As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2.
*
* Please notice that the configuration of Limit won't limit the number of results you get with the paginator. It
* only limits the number of results in each page.
*
*
* Note: If you prefer to have control on service calls, use the
* {@link #describeBatchPredictions(software.amazon.awssdk.services.machinelearning.model.DescribeBatchPredictionsRequest)}
* operation.
*
*
* @param describeBatchPredictionsRequest
* @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeBatchPredictions
*/
public DescribeBatchPredictionsPublisher describeBatchPredictionsPaginator(
DescribeBatchPredictionsRequest describeBatchPredictionsRequest) {
return new DescribeBatchPredictionsPublisher(this, applyPaginatorUserAgent(describeBatchPredictionsRequest));
}
/**
*
* Returns a list of DataSource
that match the search criteria in the request.
*
*
* @param describeDataSourcesRequest
* @return A Java Future containing the result of the DescribeDataSources operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeDataSources
*/
@Override
public CompletableFuture describeDataSources(
DescribeDataSourcesRequest describeDataSourcesRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, describeDataSourcesRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeDataSources");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, DescribeDataSourcesResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DescribeDataSources")
.withMarshaller(new DescribeDataSourcesRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(describeDataSourcesRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns a list of DataSource
that match the search criteria in the request.
*
*
*
* This is a variant of
* {@link #describeDataSources(software.amazon.awssdk.services.machinelearning.model.DescribeDataSourcesRequest)}
* operation. The return type is a custom publisher that can be subscribed to request a stream of response pages.
* SDK will internally handle making service calls for you.
*
*
* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet
* and so there is no guarantee that the request is valid. If there are errors in your request, you will see the
* failures only after you start streaming the data. The subscribe method should be called as a request to start
* streaming data. For more info, see
* {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe
* method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the
* starting request.
*
*
*
* The following are few ways to use the response class:
*
* 1) Using the subscribe helper method
*
*
* {@code
* software.amazon.awssdk.services.machinelearning.paginators.DescribeDataSourcesPublisher publisher = client.describeDataSourcesPaginator(request);
* CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
* future.get();
* }
*
*
* 2) Using a custom subscriber
*
*
* {@code
* software.amazon.awssdk.services.machinelearning.paginators.DescribeDataSourcesPublisher publisher = client.describeDataSourcesPaginator(request);
* publisher.subscribe(new Subscriber() {
*
* public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
*
*
* public void onNext(software.amazon.awssdk.services.machinelearning.model.DescribeDataSourcesResponse response) { //... };
* });}
*
*
* As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2.
*
* Please notice that the configuration of Limit won't limit the number of results you get with the paginator. It
* only limits the number of results in each page.
*
*
* Note: If you prefer to have control on service calls, use the
* {@link #describeDataSources(software.amazon.awssdk.services.machinelearning.model.DescribeDataSourcesRequest)}
* operation.
*
*
* @param describeDataSourcesRequest
* @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeDataSources
*/
public DescribeDataSourcesPublisher describeDataSourcesPaginator(DescribeDataSourcesRequest describeDataSourcesRequest) {
return new DescribeDataSourcesPublisher(this, applyPaginatorUserAgent(describeDataSourcesRequest));
}
/**
*
* Returns a list of DescribeEvaluations
that match the search criteria in the request.
*
*
* @param describeEvaluationsRequest
* @return A Java Future containing the result of the DescribeEvaluations operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeEvaluations
*/
@Override
public CompletableFuture describeEvaluations(
DescribeEvaluationsRequest describeEvaluationsRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, describeEvaluationsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeEvaluations");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, DescribeEvaluationsResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DescribeEvaluations")
.withMarshaller(new DescribeEvaluationsRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(describeEvaluationsRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns a list of DescribeEvaluations
that match the search criteria in the request.
*
*
*
* This is a variant of
* {@link #describeEvaluations(software.amazon.awssdk.services.machinelearning.model.DescribeEvaluationsRequest)}
* operation. The return type is a custom publisher that can be subscribed to request a stream of response pages.
* SDK will internally handle making service calls for you.
*
*
* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet
* and so there is no guarantee that the request is valid. If there are errors in your request, you will see the
* failures only after you start streaming the data. The subscribe method should be called as a request to start
* streaming data. For more info, see
* {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe
* method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the
* starting request.
*
*
*
* The following are few ways to use the response class:
*
* 1) Using the subscribe helper method
*
*
* {@code
* software.amazon.awssdk.services.machinelearning.paginators.DescribeEvaluationsPublisher publisher = client.describeEvaluationsPaginator(request);
* CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
* future.get();
* }
*
*
* 2) Using a custom subscriber
*
*
* {@code
* software.amazon.awssdk.services.machinelearning.paginators.DescribeEvaluationsPublisher publisher = client.describeEvaluationsPaginator(request);
* publisher.subscribe(new Subscriber() {
*
* public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
*
*
* public void onNext(software.amazon.awssdk.services.machinelearning.model.DescribeEvaluationsResponse response) { //... };
* });}
*
*
* As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2.
*
* Please notice that the configuration of Limit won't limit the number of results you get with the paginator. It
* only limits the number of results in each page.
*
*
* Note: If you prefer to have control on service calls, use the
* {@link #describeEvaluations(software.amazon.awssdk.services.machinelearning.model.DescribeEvaluationsRequest)}
* operation.
*
*
* @param describeEvaluationsRequest
* @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeEvaluations
*/
public DescribeEvaluationsPublisher describeEvaluationsPaginator(DescribeEvaluationsRequest describeEvaluationsRequest) {
return new DescribeEvaluationsPublisher(this, applyPaginatorUserAgent(describeEvaluationsRequest));
}
/**
*
* Returns a list of MLModel
that match the search criteria in the request.
*
*
* @param describeMlModelsRequest
* @return A Java Future containing the result of the DescribeMLModels operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeMLModels
*/
@Override
public CompletableFuture describeMLModels(DescribeMlModelsRequest describeMlModelsRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, describeMlModelsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeMLModels");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, DescribeMlModelsResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DescribeMLModels")
.withMarshaller(new DescribeMlModelsRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(describeMlModelsRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns a list of MLModel
that match the search criteria in the request.
*
*
*
* This is a variant of
* {@link #describeMLModels(software.amazon.awssdk.services.machinelearning.model.DescribeMlModelsRequest)}
* operation. The return type is a custom publisher that can be subscribed to request a stream of response pages.
* SDK will internally handle making service calls for you.
*
*
* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet
* and so there is no guarantee that the request is valid. If there are errors in your request, you will see the
* failures only after you start streaming the data. The subscribe method should be called as a request to start
* streaming data. For more info, see
* {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe
* method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the
* starting request.
*
*
*
* The following are few ways to use the response class:
*
* 1) Using the subscribe helper method
*
*
* {@code
* software.amazon.awssdk.services.machinelearning.paginators.DescribeMLModelsPublisher publisher = client.describeMLModelsPaginator(request);
* CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
* future.get();
* }
*
*
* 2) Using a custom subscriber
*
*
* {@code
* software.amazon.awssdk.services.machinelearning.paginators.DescribeMLModelsPublisher publisher = client.describeMLModelsPaginator(request);
* publisher.subscribe(new Subscriber() {
*
* public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
*
*
* public void onNext(software.amazon.awssdk.services.machinelearning.model.DescribeMlModelsResponse response) { //... };
* });}
*
*
* As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2.
*
* Please notice that the configuration of Limit won't limit the number of results you get with the paginator. It
* only limits the number of results in each page.
*
*
* Note: If you prefer to have control on service calls, use the
* {@link #describeMLModels(software.amazon.awssdk.services.machinelearning.model.DescribeMlModelsRequest)}
* operation.
*
*
* @param describeMlModelsRequest
* @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeMLModels
*/
public DescribeMLModelsPublisher describeMLModelsPaginator(DescribeMlModelsRequest describeMlModelsRequest) {
return new DescribeMLModelsPublisher(this, applyPaginatorUserAgent(describeMlModelsRequest));
}
/**
*
* Describes one or more of the tags for your Amazon ML object.
*
*
* @param describeTagsRequest
* @return A Java Future containing the result of the DescribeTags operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.DescribeTags
*/
@Override
public CompletableFuture describeTags(DescribeTagsRequest describeTagsRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, describeTagsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeTags");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
DescribeTagsResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DescribeTags").withMarshaller(new DescribeTagsRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(describeTagsRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns a BatchPrediction
that includes detailed metadata, status, and data file information for a
* Batch Prediction
request.
*
*
* @param getBatchPredictionRequest
* @return A Java Future containing the result of the GetBatchPrediction operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.GetBatchPrediction
*/
@Override
public CompletableFuture getBatchPrediction(GetBatchPredictionRequest getBatchPredictionRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBatchPredictionRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBatchPrediction");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, GetBatchPredictionResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBatchPrediction")
.withMarshaller(new GetBatchPredictionRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBatchPredictionRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns a DataSource
that includes metadata and data file information, as well as the current status
* of the DataSource
.
*
*
* GetDataSource
provides results in normal or verbose format. The verbose format adds the schema
* description and the list of files pointed to by the DataSource to the normal format.
*
*
* @param getDataSourceRequest
* @return A Java Future containing the result of the GetDataSource operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.GetDataSource
*/
@Override
public CompletableFuture getDataSource(GetDataSourceRequest getDataSourceRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, getDataSourceRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetDataSource");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
GetDataSourceResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetDataSource")
.withMarshaller(new GetDataSourceRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getDataSourceRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns an Evaluation
that includes metadata as well as the current status of the
* Evaluation
.
*
*
* @param getEvaluationRequest
* @return A Java Future containing the result of the GetEvaluation operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.GetEvaluation
*/
@Override
public CompletableFuture getEvaluation(GetEvaluationRequest getEvaluationRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, getEvaluationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetEvaluation");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
GetEvaluationResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetEvaluation")
.withMarshaller(new GetEvaluationRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getEvaluationRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Returns an MLModel
that includes detailed metadata, data source information, and the current status
* of the MLModel
.
*
*
* GetMLModel
provides results in normal or verbose format.
*
*
* @param getMlModelRequest
* @return A Java Future containing the result of the GetMLModel operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.GetMLModel
*/
@Override
public CompletableFuture getMLModel(GetMlModelRequest getMlModelRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, getMlModelRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetMLModel");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
GetMlModelResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams().withOperationName("GetMLModel")
.withMarshaller(new GetMlModelRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getMlModelRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Generates a prediction for the observation using the specified ML Model
.
*
*
* Note: Not all response parameters will be populated. Whether a response parameter is populated depends on
* the type of model requested.
*
*
* @param predictRequest
* @return A Java Future containing the result of the Predict operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - LimitExceededException The subscriber exceeded the maximum number of operations. This exception can
* occur when listing objects such as
DataSource
.
* - InternalServerException An error on the server occurred when trying to process a request.
* - PredictorNotMountedException The exception is thrown when a predict request is made to an unmounted
*
MLModel
.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.Predict
*/
@Override
public CompletableFuture predict(PredictRequest predictRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, predictRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "Predict");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
PredictResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams().withOperationName("Predict")
.withMarshaller(new PredictRequestMarshaller(protocolFactory)).withResponseHandler(responseHandler)
.withErrorResponseHandler(errorResponseHandler).withMetricCollector(apiCallMetricCollector)
.withInput(predictRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Updates the BatchPredictionName
of a BatchPrediction
.
*
*
* You can use the GetBatchPrediction
operation to view the contents of the updated data element.
*
*
* @param updateBatchPredictionRequest
* @return A Java Future containing the result of the UpdateBatchPrediction operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.UpdateBatchPrediction
*/
@Override
public CompletableFuture updateBatchPrediction(
UpdateBatchPredictionRequest updateBatchPredictionRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, updateBatchPredictionRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "UpdateBatchPrediction");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, UpdateBatchPredictionResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("UpdateBatchPrediction")
.withMarshaller(new UpdateBatchPredictionRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(updateBatchPredictionRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Updates the DataSourceName
of a DataSource
.
*
*
* You can use the GetDataSource
operation to view the contents of the updated data element.
*
*
* @param updateDataSourceRequest
* @return A Java Future containing the result of the UpdateDataSource operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.UpdateDataSource
*/
@Override
public CompletableFuture updateDataSource(UpdateDataSourceRequest updateDataSourceRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, updateDataSourceRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "UpdateDataSource");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, UpdateDataSourceResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("UpdateDataSource")
.withMarshaller(new UpdateDataSourceRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(updateDataSourceRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Updates the EvaluationName
of an Evaluation
.
*
*
* You can use the GetEvaluation
operation to view the contents of the updated data element.
*
*
* @param updateEvaluationRequest
* @return A Java Future containing the result of the UpdateEvaluation operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.UpdateEvaluation
*/
@Override
public CompletableFuture updateEvaluation(UpdateEvaluationRequest updateEvaluationRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, updateEvaluationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "UpdateEvaluation");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
operationMetadata, UpdateEvaluationResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("UpdateEvaluation")
.withMarshaller(new UpdateEvaluationRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(updateEvaluationRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
/**
*
* Updates the MLModelName
and the ScoreThreshold
of an MLModel
.
*
*
* You can use the GetMLModel
operation to view the contents of the updated data element.
*
*
* @param updateMlModelRequest
* @return A Java Future containing the result of the UpdateMLModel operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following
* exceptions.
*
* - InvalidInputException An error on the client occurred. Typically, the cause is an invalid input
* value.
* - ResourceNotFoundException A specified resource cannot be located.
* - InternalServerException An error on the server occurred when trying to process a request.
* - SdkException Base class for all exceptions that can be thrown by the SDK (both service and client).
* Can be used for catch all scenarios.
* - SdkClientException If any client side error occurs such as an IO related failure, failure to get
* credentials, etc.
* - MachineLearningException Base class for all service exceptions. Unknown exceptions will be thrown as
* an instance of this type.
*
* @sample MachineLearningAsyncClient.UpdateMLModel
*/
@Override
public CompletableFuture updateMLModel(UpdateMlModelRequest updateMlModelRequest) {
List metricPublishers = resolveMetricPublishers(clientConfiguration, updateMlModelRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Machine Learning");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "UpdateMLModel");
JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false)
.isPayloadJson(true).build();
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata,
UpdateMlModelResponse::builder);
HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory,
operationMetadata);
CompletableFuture executeFuture = clientHandler
.execute(new ClientExecutionParams()
.withOperationName("UpdateMLModel")
.withMarshaller(new UpdateMlModelRequestMarshaller(protocolFactory))
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(updateMlModelRequest));
CompletableFuture whenCompleted = executeFuture.whenComplete((r, e) -> {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
});
executeFuture = CompletableFutureUtils.forwardExceptionTo(whenCompleted, executeFuture);
return executeFuture;
} catch (Throwable t) {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
return CompletableFutureUtils.failedFuture(t);
}
}
@Override
public void close() {
clientHandler.close();
}
private > T init(T builder) {
return builder
.clientConfiguration(clientConfiguration)
.defaultServiceExceptionSupplier(MachineLearningException::builder)
.protocol(AwsJsonProtocol.AWS_JSON)
.protocolVersion("1.1")
.registerModeledException(
ExceptionMetadata.builder().errorCode("InvalidTagException")
.exceptionBuilderSupplier(InvalidTagException::builder).httpStatusCode(400).build())
.registerModeledException(
ExceptionMetadata.builder().errorCode("ResourceNotFoundException")
.exceptionBuilderSupplier(ResourceNotFoundException::builder).httpStatusCode(400).build())
.registerModeledException(
ExceptionMetadata.builder().errorCode("InvalidInputException")
.exceptionBuilderSupplier(InvalidInputException::builder).httpStatusCode(400).build())
.registerModeledException(
ExceptionMetadata.builder().errorCode("IdempotentParameterMismatchException")
.exceptionBuilderSupplier(IdempotentParameterMismatchException::builder).httpStatusCode(400)
.build())
.registerModeledException(
ExceptionMetadata.builder().errorCode("TagLimitExceededException")
.exceptionBuilderSupplier(TagLimitExceededException::builder).httpStatusCode(400).build())
.registerModeledException(
ExceptionMetadata.builder().errorCode("InternalServerException")
.exceptionBuilderSupplier(InternalServerException::builder).httpStatusCode(500).build())
.registerModeledException(
ExceptionMetadata.builder().errorCode("LimitExceededException")
.exceptionBuilderSupplier(LimitExceededException::builder).httpStatusCode(400).build())
.registerModeledException(
ExceptionMetadata.builder().errorCode("PredictorNotMountedException")
.exceptionBuilderSupplier(PredictorNotMountedException::builder).httpStatusCode(400).build());
}
private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration,
RequestOverrideConfiguration requestOverrideConfiguration) {
List publishers = null;
if (requestOverrideConfiguration != null) {
publishers = requestOverrideConfiguration.metricPublishers();
}
if (publishers == null || publishers.isEmpty()) {
publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS);
}
if (publishers == null) {
publishers = Collections.emptyList();
}
return publishers;
}
private T applyPaginatorUserAgent(T request) {
Consumer userAgentApplier = b -> b.addApiName(ApiName.builder()
.version(VersionInfo.SDK_VERSION).name("PAGINATED").build());
AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration()
.map(c -> c.toBuilder().applyMutation(userAgentApplier).build())
.orElse((AwsRequestOverrideConfiguration.builder().applyMutation(userAgentApplier).build()));
return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build();
}
private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory,
JsonOperationMetadata operationMetadata) {
return protocolFactory.createErrorResponseHandler(operationMetadata);
}
@Override
public MachineLearningAsyncWaiter waiter() {
return MachineLearningAsyncWaiter.builder().client(this).scheduledExecutorService(executorService).build();
}
}