All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.amazonaws.services.machinelearning.AmazonMachineLearningClient Maven / Gradle / Ivy

Go to download

The AWS SDK for Java with support for OSGi. The AWS SDK for Java provides Java APIs for building software on AWS' cost-effective, scalable, and reliable infrastructure products. The AWS Java SDK allows developers to code against APIs for all of Amazon's infrastructure web services (Amazon S3, Amazon EC2, Amazon SQS, Amazon Relational Database Service, Amazon AutoScaling, etc).

There is a newer version: 1.11.60
Show newest version
/*
 * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
 * Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License").
 * You may not use this file except in compliance with the License.
 * A copy of the License is located at
 *
 *  http://aws.amazon.com/apache2.0
 *
 * or in the "license" file accompanying this file. This file is distributed
 * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 * express or implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */
package com.amazonaws.services.machinelearning;

import org.w3c.dom.*;

import java.net.*;
import java.util.*;
import java.util.Map.Entry;

import org.apache.commons.logging.*;

import com.amazonaws.*;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;

import com.amazonaws.services.machinelearning.model.*;
import com.amazonaws.services.machinelearning.model.transform.*;

/**
 * Client for accessing Amazon Machine Learning. All service calls made using
 * this client are blocking, and will not return until the service call
 * completes.
 * 

* Definition of the public APIs exposed by Amazon Machine Learning */ @ThreadSafe public class AmazonMachineLearningClient extends AmazonWebServiceClient implements AmazonMachineLearning { /** Provider for AWS credentials. */ private AWSCredentialsProvider awsCredentialsProvider; private static final Log log = LogFactory .getLog(AmazonMachineLearning.class); /** Default signing name for the service. */ private static final String DEFAULT_SIGNING_NAME = "machinelearning"; /** The region metadata service name for computing region endpoints. */ private static final String DEFAULT_ENDPOINT_PREFIX = "machinelearning"; /** * Client configuration factory providing ClientConfigurations tailored to * this client */ protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory(); private final SdkJsonProtocolFactory protocolFactory = new SdkJsonProtocolFactory( new JsonClientMetadata() .withProtocolVersion("1.1") .withSupportsCbor(false) .addErrorMetadata( new JsonErrorShapeMetadata() .withErrorCode("InternalServerException") .withModeledClass( com.amazonaws.services.machinelearning.model.InternalServerException.class)) .addErrorMetadata( new JsonErrorShapeMetadata() .withErrorCode("LimitExceededException") .withModeledClass( com.amazonaws.services.machinelearning.model.LimitExceededException.class)) .addErrorMetadata( new JsonErrorShapeMetadata() .withErrorCode("InvalidInputException") .withModeledClass( com.amazonaws.services.machinelearning.model.InvalidInputException.class)) .addErrorMetadata( new JsonErrorShapeMetadata() .withErrorCode( "IdempotentParameterMismatchException") .withModeledClass( com.amazonaws.services.machinelearning.model.IdempotentParameterMismatchException.class)) .addErrorMetadata( new JsonErrorShapeMetadata() .withErrorCode( "PredictorNotMountedException") .withModeledClass( com.amazonaws.services.machinelearning.model.PredictorNotMountedException.class)) .addErrorMetadata( new JsonErrorShapeMetadata() .withErrorCode("ResourceNotFoundException") .withModeledClass( com.amazonaws.services.machinelearning.model.ResourceNotFoundException.class))); /** * Constructs a new client to invoke service methods on Amazon Machine * Learning. A credentials provider chain will be used that searches for * credentials in this order: *

    *
  • Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
  • *
  • Java System Properties - aws.accessKeyId and aws.secretKey
  • *
  • Instance profile credentials delivered through the Amazon EC2 * metadata service
  • *
* *

* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @see DefaultAWSCredentialsProviderChain */ public AmazonMachineLearningClient() { this(new DefaultAWSCredentialsProviderChain(), configFactory .getConfig()); } /** * Constructs a new client to invoke service methods on Amazon Machine * Learning. A credentials provider chain will be used that searches for * credentials in this order: *

    *
  • Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
  • *
  • Java System Properties - aws.accessKeyId and aws.secretKey
  • *
  • Instance profile credentials delivered through the Amazon EC2 * metadata service
  • *
* *

* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param clientConfiguration * The client configuration options controlling how this client * connects to Amazon Machine Learning (ex: proxy settings, retry * counts, etc.). * * @see DefaultAWSCredentialsProviderChain */ public AmazonMachineLearningClient(ClientConfiguration clientConfiguration) { this(new DefaultAWSCredentialsProviderChain(), clientConfiguration); } /** * Constructs a new client to invoke service methods on Amazon Machine * Learning using the specified AWS account credentials. * *

* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when * authenticating with AWS services. */ public AmazonMachineLearningClient(AWSCredentials awsCredentials) { this(awsCredentials, configFactory.getConfig()); } /** * Constructs a new client to invoke service methods on Amazon Machine * Learning using the specified AWS account credentials and client * configuration options. * *

* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when * authenticating with AWS services. * @param clientConfiguration * The client configuration options controlling how this client * connects to Amazon Machine Learning (ex: proxy settings, retry * counts, etc.). */ public AmazonMachineLearningClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) { super(clientConfiguration); this.awsCredentialsProvider = new StaticCredentialsProvider( awsCredentials); init(); } /** * Constructs a new client to invoke service methods on Amazon Machine * Learning using the specified AWS account credentials provider. * *

* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. */ public AmazonMachineLearningClient( AWSCredentialsProvider awsCredentialsProvider) { this(awsCredentialsProvider, configFactory.getConfig()); } /** * Constructs a new client to invoke service methods on Amazon Machine * Learning using the specified AWS account credentials provider and client * configuration options. * *

* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. * @param clientConfiguration * The client configuration options controlling how this client * connects to Amazon Machine Learning (ex: proxy settings, retry * counts, etc.). */ public AmazonMachineLearningClient( AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) { this(awsCredentialsProvider, clientConfiguration, null); } /** * Constructs a new client to invoke service methods on Amazon Machine * Learning using the specified AWS account credentials provider, client * configuration options, and request metric collector. * *

* All service calls made using this new client object are blocking, and * will not return until the service call completes. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to * authenticate requests with AWS services. * @param clientConfiguration * The client configuration options controlling how this client * connects to Amazon Machine Learning (ex: proxy settings, retry * counts, etc.). * @param requestMetricCollector * optional request metric collector */ public AmazonMachineLearningClient( AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration, RequestMetricCollector requestMetricCollector) { super(clientConfiguration, requestMetricCollector); this.awsCredentialsProvider = awsCredentialsProvider; init(); } private void init() { setServiceNameIntern(DEFAULT_SIGNING_NAME); setEndpointPrefix(DEFAULT_ENDPOINT_PREFIX); // calling this.setEndPoint(...) will also modify the signer accordingly setEndpoint("https://machinelearning.us-east-1.amazonaws.com"); HandlerChainFactory chainFactory = new HandlerChainFactory(); requestHandler2s .addAll(chainFactory .newRequestHandlerChain("/com/amazonaws/services/machinelearning/request.handlers")); requestHandler2s .addAll(chainFactory .newRequestHandler2Chain("/com/amazonaws/services/machinelearning/request.handler2s")); } /** *

* Generates predictions for a group of observations. The observations to * process exist in one or more data files referenced by a * DataSource. This operation creates a new * BatchPrediction, and uses an MLModel and the * data files referenced by the DataSource as information * sources. *

*

* CreateBatchPrediction is an asynchronous operation. In * response to CreateBatchPrediction, Amazon Machine Learning * (Amazon ML) immediately returns and sets the BatchPrediction * status to PENDING. After the BatchPrediction * completes, Amazon ML sets the status to COMPLETED. *

*

* You can poll for status updates by using the GetBatchPrediction * operation and checking the Status parameter of the result. * After the COMPLETED status appears, the results are * available in the location specified by the OutputUri * parameter. *

* * @param createBatchPredictionRequest * @return Result of the CreateBatchPrediction operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @throws IdempotentParameterMismatchException * A second request to use or change an object was not allowed. This * can result from retrying a request using a parameter that was not * present in the original request. * @sample AmazonMachineLearning.CreateBatchPrediction */ @Override public CreateBatchPredictionResult createBatchPrediction( CreateBatchPredictionRequest createBatchPredictionRequest) { ExecutionContext executionContext = createExecutionContext(createBatchPredictionRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateBatchPredictionRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(createBatchPredictionRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new CreateBatchPredictionResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Creates a DataSource object from an Amazon Relational Database Service * (Amazon RDS). A DataSource references data that can be used * to perform CreateMLModel, CreateEvaluation, or * CreateBatchPrediction operations. *

*

* CreateDataSourceFromRDS is an asynchronous operation. In * response to CreateDataSourceFromRDS, Amazon Machine Learning * (Amazon ML) immediately returns and sets the DataSource * status to PENDING. After the DataSource is * created and ready for use, Amazon ML sets the Status * parameter to COMPLETED. DataSource in * COMPLETED or PENDING status can only be used to * perform CreateMLModel, CreateEvaluation, or * CreateBatchPrediction operations. *

*

* If Amazon ML cannot accept the input source, it sets the * Status parameter to FAILED and includes an * error message in the Message attribute of the * GetDataSource operation response. *

* * @param createDataSourceFromRDSRequest * @return Result of the CreateDataSourceFromRDS operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @throws IdempotentParameterMismatchException * A second request to use or change an object was not allowed. This * can result from retrying a request using a parameter that was not * present in the original request. * @sample AmazonMachineLearning.CreateDataSourceFromRDS */ @Override public CreateDataSourceFromRDSResult createDataSourceFromRDS( CreateDataSourceFromRDSRequest createDataSourceFromRDSRequest) { ExecutionContext executionContext = createExecutionContext(createDataSourceFromRDSRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateDataSourceFromRDSRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(createDataSourceFromRDSRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new CreateDataSourceFromRDSResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Creates a DataSource from Amazon Redshift. A * DataSource references data that can be used to perform * either CreateMLModel, CreateEvaluation or * CreateBatchPrediction operations. *

*

* CreateDataSourceFromRedshift is an asynchronous operation. * In response to CreateDataSourceFromRedshift, Amazon Machine * Learning (Amazon ML) immediately returns and sets the * DataSource status to PENDING. After the * DataSource is created and ready for use, Amazon ML sets the * Status parameter to COMPLETED. * DataSource in COMPLETED or PENDING * status can only be used to perform CreateMLModel, * CreateEvaluation, or CreateBatchPrediction operations. *

*

* If Amazon ML cannot accept the input source, it sets the * Status parameter to FAILED and includes an * error message in the Message attribute of the * GetDataSource operation response. *

*

* The observations should exist in the database hosted on an Amazon * Redshift cluster and should be specified by a SelectSqlQuery * . Amazon ML executes * Unload command in Amazon Redshift to transfer the result set of * SelectSqlQuery to S3StagingLocation. *

*

* After the DataSource is created, it's ready for use in * evaluations and batch predictions. If you plan to use the * DataSource to train an MLModel, the * DataSource requires another item -- a recipe. A recipe * describes the observation variables that participate in training an * MLModel. A recipe describes how each input variable will be * used in training. Will the variable be included or excluded from * training? Will the variable be manipulated, for example, combined with * another variable or split apart into word combinations? The recipe * provides answers to these questions. For more information, see the Amazon * Machine Learning Developer Guide. *

* * @param createDataSourceFromRedshiftRequest * @return Result of the CreateDataSourceFromRedshift operation returned by * the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @throws IdempotentParameterMismatchException * A second request to use or change an object was not allowed. This * can result from retrying a request using a parameter that was not * present in the original request. * @sample AmazonMachineLearning.CreateDataSourceFromRedshift */ @Override public CreateDataSourceFromRedshiftResult createDataSourceFromRedshift( CreateDataSourceFromRedshiftRequest createDataSourceFromRedshiftRequest) { ExecutionContext executionContext = createExecutionContext(createDataSourceFromRedshiftRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateDataSourceFromRedshiftRequestMarshaller( protocolFactory) .marshall(super .beforeMarshalling(createDataSourceFromRedshiftRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler( new JsonOperationMetadata().withPayloadJson(true) .withHasStreamingSuccessResponse(false), new CreateDataSourceFromRedshiftResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Creates a DataSource object. A DataSource * references data that can be used to perform CreateMLModel, * CreateEvaluation, or CreateBatchPrediction operations. *

*

* CreateDataSourceFromS3 is an asynchronous operation. In * response to CreateDataSourceFromS3, Amazon Machine Learning * (Amazon ML) immediately returns and sets the DataSource * status to PENDING. After the DataSource is * created and ready for use, Amazon ML sets the Status * parameter to COMPLETED. DataSource in * COMPLETED or PENDING status can only be used to * perform CreateMLModel, CreateEvaluation or * CreateBatchPrediction operations. *

*

* If Amazon ML cannot accept the input source, it sets the * Status parameter to FAILED and includes an * error message in the Message attribute of the * GetDataSource operation response. *

*

* The observation data used in a DataSource should be ready to * use; that is, it should have a consistent structure, and missing data * values should be kept to a minimum. The observation data must reside in * one or more CSV files in an Amazon Simple Storage Service (Amazon S3) * bucket, along with a schema that describes the data items by name and * type. The same schema must be used for all of the data files referenced * by the DataSource. *

*

* After the DataSource has been created, it's ready to use in * evaluations and batch predictions. If you plan to use the * DataSource to train an MLModel, the * DataSource requires another item: a recipe. A recipe * describes the observation variables that participate in training an * MLModel. A recipe describes how each input variable will be * used in training. Will the variable be included or excluded from * training? Will the variable be manipulated, for example, combined with * another variable, or split apart into word combinations? The recipe * provides answers to these questions. For more information, see the Amazon * Machine Learning Developer Guide. *

* * @param createDataSourceFromS3Request * @return Result of the CreateDataSourceFromS3 operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @throws IdempotentParameterMismatchException * A second request to use or change an object was not allowed. This * can result from retrying a request using a parameter that was not * present in the original request. * @sample AmazonMachineLearning.CreateDataSourceFromS3 */ @Override public CreateDataSourceFromS3Result createDataSourceFromS3( CreateDataSourceFromS3Request createDataSourceFromS3Request) { ExecutionContext executionContext = createExecutionContext(createDataSourceFromS3Request); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateDataSourceFromS3RequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(createDataSourceFromS3Request)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new CreateDataSourceFromS3ResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Creates a new Evaluation of an MLModel. An * MLModel is evaluated on a set of observations associated to * a DataSource. Like a DataSource for an * MLModel, the DataSource for an * Evaluation contains values for the Target Variable. The * Evaluation compares the predicted result for each * observation to the actual outcome and provides a summary so that you know * how effective the MLModel functions on the test data. * Evaluation generates a relevant performance metric such as BinaryAUC, * RegressionRMSE or MulticlassAvgFScore based on the corresponding * MLModelType: BINARY, REGRESSION or * MULTICLASS. *

*

* CreateEvaluation is an asynchronous operation. In response * to CreateEvaluation, Amazon Machine Learning (Amazon ML) * immediately returns and sets the evaluation status to * PENDING. After the Evaluation is created and * ready for use, Amazon ML sets the status to COMPLETED. *

*

* You can use the GetEvaluation operation to check progress of the * evaluation during the creation operation. *

* * @param createEvaluationRequest * @return Result of the CreateEvaluation operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @throws IdempotentParameterMismatchException * A second request to use or change an object was not allowed. This * can result from retrying a request using a parameter that was not * present in the original request. * @sample AmazonMachineLearning.CreateEvaluation */ @Override public CreateEvaluationResult createEvaluation( CreateEvaluationRequest createEvaluationRequest) { ExecutionContext executionContext = createExecutionContext(createEvaluationRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateEvaluationRequestMarshaller(protocolFactory) .marshall(super .beforeMarshalling(createEvaluationRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new CreateEvaluationResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Creates a new MLModel using the data files and the recipe as * information sources. *

*

* An MLModel is nearly immutable. Users can only update the * MLModelName and the ScoreThreshold in an * MLModel without creating a new MLModel. *

*

* CreateMLModel is an asynchronous operation. In response to * CreateMLModel, Amazon Machine Learning (Amazon ML) * immediately returns and sets the MLModel status to * PENDING. After the MLModel is created and ready * for use, Amazon ML sets the status to COMPLETED. *

*

* You can use the GetMLModel operation to check progress of the * MLModel during the creation operation. *

*

* CreateMLModel requires a DataSource with computed * statistics, which can be created by setting * ComputeStatistics to true in * CreateDataSourceFromRDS, CreateDataSourceFromS3, or * CreateDataSourceFromRedshift operations. *

* * @param createMLModelRequest * @return Result of the CreateMLModel operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @throws IdempotentParameterMismatchException * A second request to use or change an object was not allowed. This * can result from retrying a request using a parameter that was not * present in the original request. * @sample AmazonMachineLearning.CreateMLModel */ @Override public CreateMLModelResult createMLModel( CreateMLModelRequest createMLModelRequest) { ExecutionContext executionContext = createExecutionContext(createMLModelRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateMLModelRequestMarshaller(protocolFactory) .marshall(super.beforeMarshalling(createMLModelRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new CreateMLModelResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Creates a real-time endpoint for the MLModel. The endpoint * contains the URI of the MLModel; that is, the location to * send real-time prediction requests for the specified MLModel * . *

* * @param createRealtimeEndpointRequest * @return Result of the CreateRealtimeEndpoint operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.CreateRealtimeEndpoint */ @Override public CreateRealtimeEndpointResult createRealtimeEndpoint( CreateRealtimeEndpointRequest createRealtimeEndpointRequest) { ExecutionContext executionContext = createExecutionContext(createRealtimeEndpointRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new CreateRealtimeEndpointRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(createRealtimeEndpointRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new CreateRealtimeEndpointResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Assigns the DELETED status to a BatchPrediction, rendering * it unusable. *

*

* After using the DeleteBatchPrediction operation, you can use * the GetBatchPrediction operation to verify that the status of the * BatchPrediction changed to DELETED. *

*

* Caution: The result of the DeleteBatchPrediction * operation is irreversible. *

* * @param deleteBatchPredictionRequest * @return Result of the DeleteBatchPrediction operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DeleteBatchPrediction */ @Override public DeleteBatchPredictionResult deleteBatchPrediction( DeleteBatchPredictionRequest deleteBatchPredictionRequest) { ExecutionContext executionContext = createExecutionContext(deleteBatchPredictionRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteBatchPredictionRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(deleteBatchPredictionRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DeleteBatchPredictionResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Assigns the DELETED status to a DataSource, rendering it * unusable. *

*

* After using the DeleteDataSource operation, you can use the * GetDataSource operation to verify that the status of the * DataSource changed to DELETED. *

*

* Caution: The results of the DeleteDataSource * operation are irreversible. *

* * @param deleteDataSourceRequest * @return Result of the DeleteDataSource operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DeleteDataSource */ @Override public DeleteDataSourceResult deleteDataSource( DeleteDataSourceRequest deleteDataSourceRequest) { ExecutionContext executionContext = createExecutionContext(deleteDataSourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteDataSourceRequestMarshaller(protocolFactory) .marshall(super .beforeMarshalling(deleteDataSourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DeleteDataSourceResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Assigns the DELETED status to an Evaluation, * rendering it unusable. *

*

* After invoking the DeleteEvaluation operation, you can use * the GetEvaluation operation to verify that the status of the * Evaluation changed to DELETED. *

*

* Caution: The results of the DeleteEvaluation * operation are irreversible. *

* * @param deleteEvaluationRequest * @return Result of the DeleteEvaluation operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DeleteEvaluation */ @Override public DeleteEvaluationResult deleteEvaluation( DeleteEvaluationRequest deleteEvaluationRequest) { ExecutionContext executionContext = createExecutionContext(deleteEvaluationRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteEvaluationRequestMarshaller(protocolFactory) .marshall(super .beforeMarshalling(deleteEvaluationRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DeleteEvaluationResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Assigns the DELETED status to an MLModel, rendering it * unusable. *

*

* After using the DeleteMLModel operation, you can use the * GetMLModel operation to verify that the status of the * MLModel changed to DELETED. *

*

* Caution: The result of the DeleteMLModel operation is * irreversible. *

* * @param deleteMLModelRequest * @return Result of the DeleteMLModel operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DeleteMLModel */ @Override public DeleteMLModelResult deleteMLModel( DeleteMLModelRequest deleteMLModelRequest) { ExecutionContext executionContext = createExecutionContext(deleteMLModelRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteMLModelRequestMarshaller(protocolFactory) .marshall(super.beforeMarshalling(deleteMLModelRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DeleteMLModelResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Deletes a real time endpoint of an MLModel. *

* * @param deleteRealtimeEndpointRequest * @return Result of the DeleteRealtimeEndpoint operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DeleteRealtimeEndpoint */ @Override public DeleteRealtimeEndpointResult deleteRealtimeEndpoint( DeleteRealtimeEndpointRequest deleteRealtimeEndpointRequest) { ExecutionContext executionContext = createExecutionContext(deleteRealtimeEndpointRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DeleteRealtimeEndpointRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(deleteRealtimeEndpointRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DeleteRealtimeEndpointResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Returns a list of BatchPrediction operations that match the * search criteria in the request. *

* * @param describeBatchPredictionsRequest * @return Result of the DescribeBatchPredictions operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DescribeBatchPredictions */ @Override public DescribeBatchPredictionsResult describeBatchPredictions( DescribeBatchPredictionsRequest describeBatchPredictionsRequest) { ExecutionContext executionContext = createExecutionContext(describeBatchPredictionsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeBatchPredictionsRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(describeBatchPredictionsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler( new JsonOperationMetadata().withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DescribeBatchPredictionsResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public DescribeBatchPredictionsResult describeBatchPredictions() { return describeBatchPredictions(new DescribeBatchPredictionsRequest()); } /** *

* Returns a list of DataSource that match the search criteria * in the request. *

* * @param describeDataSourcesRequest * @return Result of the DescribeDataSources operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DescribeDataSources */ @Override public DescribeDataSourcesResult describeDataSources( DescribeDataSourcesRequest describeDataSourcesRequest) { ExecutionContext executionContext = createExecutionContext(describeDataSourcesRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeDataSourcesRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(describeDataSourcesRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DescribeDataSourcesResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public DescribeDataSourcesResult describeDataSources() { return describeDataSources(new DescribeDataSourcesRequest()); } /** *

* Returns a list of DescribeEvaluations that match the search * criteria in the request. *

* * @param describeEvaluationsRequest * @return Result of the DescribeEvaluations operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DescribeEvaluations */ @Override public DescribeEvaluationsResult describeEvaluations( DescribeEvaluationsRequest describeEvaluationsRequest) { ExecutionContext executionContext = createExecutionContext(describeEvaluationsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeEvaluationsRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(describeEvaluationsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DescribeEvaluationsResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public DescribeEvaluationsResult describeEvaluations() { return describeEvaluations(new DescribeEvaluationsRequest()); } /** *

* Returns a list of MLModel that match the search criteria in * the request. *

* * @param describeMLModelsRequest * @return Result of the DescribeMLModels operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.DescribeMLModels */ @Override public DescribeMLModelsResult describeMLModels( DescribeMLModelsRequest describeMLModelsRequest) { ExecutionContext executionContext = createExecutionContext(describeMLModelsRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new DescribeMLModelsRequestMarshaller(protocolFactory) .marshall(super .beforeMarshalling(describeMLModelsRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new DescribeMLModelsResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } @Override public DescribeMLModelsResult describeMLModels() { return describeMLModels(new DescribeMLModelsRequest()); } /** *

* Returns a BatchPrediction that includes detailed metadata, * status, and data file information for a Batch Prediction * request. *

* * @param getBatchPredictionRequest * @return Result of the GetBatchPrediction operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.GetBatchPrediction */ @Override public GetBatchPredictionResult getBatchPrediction( GetBatchPredictionRequest getBatchPredictionRequest) { ExecutionContext executionContext = createExecutionContext(getBatchPredictionRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetBatchPredictionRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(getBatchPredictionRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new GetBatchPredictionResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Returns a DataSource that includes metadata and data file * information, as well as the current status of the DataSource * . *

*

* GetDataSource provides results in normal or verbose format. * The verbose format adds the schema description and the list of files * pointed to by the DataSource to the normal format. *

* * @param getDataSourceRequest * @return Result of the GetDataSource operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.GetDataSource */ @Override public GetDataSourceResult getDataSource( GetDataSourceRequest getDataSourceRequest) { ExecutionContext executionContext = createExecutionContext(getDataSourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetDataSourceRequestMarshaller(protocolFactory) .marshall(super.beforeMarshalling(getDataSourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new GetDataSourceResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Returns an Evaluation that includes metadata as well as the * current status of the Evaluation. *

* * @param getEvaluationRequest * @return Result of the GetEvaluation operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.GetEvaluation */ @Override public GetEvaluationResult getEvaluation( GetEvaluationRequest getEvaluationRequest) { ExecutionContext executionContext = createExecutionContext(getEvaluationRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetEvaluationRequestMarshaller(protocolFactory) .marshall(super.beforeMarshalling(getEvaluationRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new GetEvaluationResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Returns an MLModel that includes detailed metadata, and data * source information as well as the current status of the * MLModel. *

*

* GetMLModel provides results in normal or verbose format. *

* * @param getMLModelRequest * @return Result of the GetMLModel operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.GetMLModel */ @Override public GetMLModelResult getMLModel(GetMLModelRequest getMLModelRequest) { ExecutionContext executionContext = createExecutionContext(getMLModelRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new GetMLModelRequestMarshaller(protocolFactory) .marshall(super.beforeMarshalling(getMLModelRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new GetMLModelResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Generates a prediction for the observation using the specified * ML Model. *

* Note *

* Not all response parameters will be populated. Whether a response * parameter is populated depends on the type of model requested. *

*
* * @param predictRequest * @return Result of the Predict operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws LimitExceededException * The subscriber exceeded the maximum number of operations. This * exception can occur when listing objects such as * DataSource. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @throws PredictorNotMountedException * The exception is thrown when a predict request is made to an * unmounted MLModel. * @sample AmazonMachineLearning.Predict */ @Override public PredictResult predict(PredictRequest predictRequest) { ExecutionContext executionContext = createExecutionContext(predictRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new PredictRequestMarshaller(protocolFactory) .marshall(super.beforeMarshalling(predictRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new PredictResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Updates the BatchPredictionName of a * BatchPrediction. *

*

* You can use the GetBatchPrediction operation to view the contents * of the updated data element. *

* * @param updateBatchPredictionRequest * @return Result of the UpdateBatchPrediction operation returned by the * service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.UpdateBatchPrediction */ @Override public UpdateBatchPredictionResult updateBatchPrediction( UpdateBatchPredictionRequest updateBatchPredictionRequest) { ExecutionContext executionContext = createExecutionContext(updateBatchPredictionRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateBatchPredictionRequestMarshaller( protocolFactory).marshall(super .beforeMarshalling(updateBatchPredictionRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new UpdateBatchPredictionResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Updates the DataSourceName of a DataSource. *

*

* You can use the GetDataSource operation to view the contents of * the updated data element. *

* * @param updateDataSourceRequest * @return Result of the UpdateDataSource operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.UpdateDataSource */ @Override public UpdateDataSourceResult updateDataSource( UpdateDataSourceRequest updateDataSourceRequest) { ExecutionContext executionContext = createExecutionContext(updateDataSourceRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateDataSourceRequestMarshaller(protocolFactory) .marshall(super .beforeMarshalling(updateDataSourceRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new UpdateDataSourceResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Updates the EvaluationName of an Evaluation. *

*

* You can use the GetEvaluation operation to view the contents of * the updated data element. *

* * @param updateEvaluationRequest * @return Result of the UpdateEvaluation operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.UpdateEvaluation */ @Override public UpdateEvaluationResult updateEvaluation( UpdateEvaluationRequest updateEvaluationRequest) { ExecutionContext executionContext = createExecutionContext(updateEvaluationRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateEvaluationRequestMarshaller(protocolFactory) .marshall(super .beforeMarshalling(updateEvaluationRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new UpdateEvaluationResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** *

* Updates the MLModelName and the ScoreThreshold * of an MLModel. *

*

* You can use the GetMLModel operation to view the contents of the * updated data element. *

* * @param updateMLModelRequest * @return Result of the UpdateMLModel operation returned by the service. * @throws InvalidInputException * An error on the client occurred. Typically, the cause is an * invalid input value. * @throws ResourceNotFoundException * A specified resource cannot be located. * @throws InternalServerException * An error on the server occurred when trying to process a request. * @sample AmazonMachineLearning.UpdateMLModel */ @Override public UpdateMLModelResult updateMLModel( UpdateMLModelRequest updateMLModelRequest) { ExecutionContext executionContext = createExecutionContext(updateMLModelRequest); AWSRequestMetrics awsRequestMetrics = executionContext .getAwsRequestMetrics(); awsRequestMetrics.startEvent(Field.ClientExecuteTime); Request request = null; Response response = null; try { awsRequestMetrics.startEvent(Field.RequestMarshallTime); try { request = new UpdateMLModelRequestMarshaller(protocolFactory) .marshall(super.beforeMarshalling(updateMLModelRequest)); // Binds the request metrics to the current request. request.setAWSRequestMetrics(awsRequestMetrics); } finally { awsRequestMetrics.endEvent(Field.RequestMarshallTime); } HttpResponseHandler> responseHandler = protocolFactory .createResponseHandler(new JsonOperationMetadata() .withPayloadJson(true) .withHasStreamingSuccessResponse(false), new UpdateMLModelResultJsonUnmarshaller()); response = invoke(request, responseHandler, executionContext); return response.getAwsResponse(); } finally { endClientExecution(awsRequestMetrics, request, response); } } /** * Returns additional metadata for a previously executed successful, * request, typically used for debugging issues where a service isn't acting * as expected. This data isn't considered part of the result data returned * by an operation, so it's available through this separate, diagnostic * interface. *

* Response metadata is only cached for a limited period of time, so if you * need to access this extra diagnostic information for an executed request, * you should use this method to retrieve it as soon as possible after * executing the request. * * @param request * The originally executed request * * @return The response metadata for the specified request, or null if none * is available. */ public ResponseMetadata getCachedResponseMetadata( AmazonWebServiceRequest request) { return client.getResponseMetadataForRequest(request); } /** * Normal invoke with authentication. Credentials are required and may be * overriden at the request level. **/ private Response invoke( Request request, HttpResponseHandler> responseHandler, ExecutionContext executionContext) { executionContext.setCredentialsProvider(CredentialUtils .getCredentialsProvider(request.getOriginalRequest(), awsCredentialsProvider)); return doInvoke(request, responseHandler, executionContext); } /** * Invoke with no authentication. Credentials are not required and any * credentials set on the client or request will be ignored for this * operation. **/ private Response anonymousInvoke( Request request, HttpResponseHandler> responseHandler, ExecutionContext executionContext) { return doInvoke(request, responseHandler, executionContext); } /** * Invoke the request using the http client. Assumes credentials (or lack * thereof) have been configured in the ExecutionContext beforehand. **/ private Response doInvoke( Request request, HttpResponseHandler> responseHandler, ExecutionContext executionContext) { request.setEndpoint(endpoint); request.setTimeOffset(timeOffset); HttpResponseHandler errorResponseHandler = protocolFactory .createErrorResponseHandler(new JsonErrorResponseMetadata()); return client.execute(request, responseHandler, errorResponseHandler, executionContext); } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy