com.amazonaws.services.sagemaker.AmazonSageMakerClient Maven / Gradle / Ivy
/*
* Copyright 2019-2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.sagemaker;
import org.w3c.dom.*;
import java.net.*;
import java.util.*;
import javax.annotation.Generated;
import org.apache.commons.logging.*;
import com.amazonaws.*;
import com.amazonaws.annotation.SdkInternalApi;
import com.amazonaws.auth.*;
import com.amazonaws.handlers.*;
import com.amazonaws.http.*;
import com.amazonaws.internal.*;
import com.amazonaws.internal.auth.*;
import com.amazonaws.metrics.*;
import com.amazonaws.regions.*;
import com.amazonaws.transform.*;
import com.amazonaws.util.*;
import com.amazonaws.protocol.json.*;
import com.amazonaws.util.AWSRequestMetrics.Field;
import com.amazonaws.annotation.ThreadSafe;
import com.amazonaws.client.AwsSyncClientParams;
import com.amazonaws.client.builder.AdvancedConfig;
import com.amazonaws.services.sagemaker.AmazonSageMakerClientBuilder;
import com.amazonaws.services.sagemaker.waiters.AmazonSageMakerWaiters;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.sagemaker.model.*;
import com.amazonaws.services.sagemaker.model.transform.*;
/**
* Client for accessing SageMaker. All service calls made using this client are blocking, and will not return until the
* service call completes.
*
*
* Provides APIs for creating and managing SageMaker resources.
*
*
* Other Resources:
*
*
* -
*
*
* -
*
*
*
*/
@ThreadSafe
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class AmazonSageMakerClient extends AmazonWebServiceClient implements AmazonSageMaker {
/** Provider for AWS credentials. */
private final AWSCredentialsProvider awsCredentialsProvider;
private static final Log log = LogFactory.getLog(AmazonSageMaker.class);
/** Default signing name for the service. */
private static final String DEFAULT_SIGNING_NAME = "sagemaker";
private volatile AmazonSageMakerWaiters waiters;
/** Client configuration factory providing ClientConfigurations tailored to this client */
protected static final ClientConfigurationFactory configFactory = new ClientConfigurationFactory();
private final AdvancedConfig advancedConfig;
private static final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory = new com.amazonaws.protocol.json.SdkJsonProtocolFactory(
new JsonClientMetadata()
.withProtocolVersion("1.1")
.withSupportsCbor(false)
.withSupportsIon(false)
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ResourceInUse").withExceptionUnmarshaller(
com.amazonaws.services.sagemaker.model.transform.ResourceInUseExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ConflictException").withExceptionUnmarshaller(
com.amazonaws.services.sagemaker.model.transform.ConflictExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ResourceNotFound").withExceptionUnmarshaller(
com.amazonaws.services.sagemaker.model.transform.ResourceNotFoundExceptionUnmarshaller.getInstance()))
.addErrorMetadata(
new JsonErrorShapeMetadata().withErrorCode("ResourceLimitExceeded").withExceptionUnmarshaller(
com.amazonaws.services.sagemaker.model.transform.ResourceLimitExceededExceptionUnmarshaller.getInstance()))
.withBaseServiceExceptionClass(com.amazonaws.services.sagemaker.model.AmazonSageMakerException.class));
public static AmazonSageMakerClientBuilder builder() {
return AmazonSageMakerClientBuilder.standard();
}
/**
* Constructs a new client to invoke service methods on SageMaker using the specified parameters.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonSageMakerClient(AwsSyncClientParams clientParams) {
this(clientParams, false);
}
/**
* Constructs a new client to invoke service methods on SageMaker using the specified parameters.
*
*
* All service calls made using this new client object are blocking, and will not return until the service call
* completes.
*
* @param clientParams
* Object providing client parameters.
*/
AmazonSageMakerClient(AwsSyncClientParams clientParams, boolean endpointDiscoveryEnabled) {
super(clientParams);
this.awsCredentialsProvider = clientParams.getCredentialsProvider();
this.advancedConfig = clientParams.getAdvancedConfig();
init();
}
private void init() {
setServiceNameIntern(DEFAULT_SIGNING_NAME);
setEndpointPrefix(ENDPOINT_PREFIX);
// calling this.setEndPoint(...) will also modify the signer accordingly
setEndpoint("sagemaker.us-east-1.amazonaws.com");
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s.addAll(chainFactory.newRequestHandlerChain("/com/amazonaws/services/sagemaker/request.handlers"));
requestHandler2s.addAll(chainFactory.newRequestHandler2Chain("/com/amazonaws/services/sagemaker/request.handler2s"));
requestHandler2s.addAll(chainFactory.getGlobalHandlers());
}
/**
*
* Creates an association between the source and the destination. A source can be associated with multiple
* destinations, and a destination can be associated with multiple sources. An association is a lineage tracking
* entity. For more information, see Amazon SageMaker ML Lineage
* Tracking.
*
*
* @param addAssociationRequest
* @return Result of the AddAssociation operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.AddAssociation
* @see AWS API
* Documentation
*/
@Override
public AddAssociationResult addAssociation(AddAssociationRequest request) {
request = beforeClientExecution(request);
return executeAddAssociation(request);
}
@SdkInternalApi
final AddAssociationResult executeAddAssociation(AddAssociationRequest addAssociationRequest) {
ExecutionContext executionContext = createExecutionContext(addAssociationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AddAssociationRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(addAssociationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AddAssociation");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new AddAssociationResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Adds or overwrites one or more tags for the specified SageMaker resource. You can add tags to notebook instances,
* training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint
* configurations, and endpoints.
*
*
* Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information
* about tags, see For more information, see Amazon Web Services Tagging
* Strategies.
*
*
*
* Tags that you add to a hyperparameter tuning job by calling this API are also added to any training jobs that the
* hyperparameter tuning job launches after you call this API, but not to training jobs that the hyperparameter
* tuning job launched before you called this API. To make sure that the tags associated with a hyperparameter
* tuning job are also added to all training jobs that the hyperparameter tuning job launches, add the tags when you
* first create the tuning job by specifying them in the Tags
parameter of CreateHyperParameterTuningJob
*
*
*
* Tags that you add to a SageMaker Domain or User Profile by calling this API are also added to any Apps that the
* Domain or User Profile launches after you call this API, but not to Apps that the Domain or User Profile launched
* before you called this API. To make sure that the tags associated with a Domain or User Profile are also added to
* all Apps that the Domain or User Profile launches, add the tags when you first create the Domain or User Profile
* by specifying them in the Tags
parameter of CreateDomain or CreateUserProfile.
*
*
*
* @param addTagsRequest
* @return Result of the AddTags operation returned by the service.
* @sample AmazonSageMaker.AddTags
* @see AWS API
* Documentation
*/
@Override
public AddTagsResult addTags(AddTagsRequest request) {
request = beforeClientExecution(request);
return executeAddTags(request);
}
@SdkInternalApi
final AddTagsResult executeAddTags(AddTagsRequest addTagsRequest) {
ExecutionContext executionContext = createExecutionContext(addTagsRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AddTagsRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(addTagsRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AddTags");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new AddTagsResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Associates a trial component with a trial. A trial component can be associated with multiple trials. To
* disassociate a trial component from a trial, call the DisassociateTrialComponent API.
*
*
* @param associateTrialComponentRequest
* @return Result of the AssociateTrialComponent operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.AssociateTrialComponent
* @see AWS API Documentation
*/
@Override
public AssociateTrialComponentResult associateTrialComponent(AssociateTrialComponentRequest request) {
request = beforeClientExecution(request);
return executeAssociateTrialComponent(request);
}
@SdkInternalApi
final AssociateTrialComponentResult executeAssociateTrialComponent(AssociateTrialComponentRequest associateTrialComponentRequest) {
ExecutionContext executionContext = createExecutionContext(associateTrialComponentRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new AssociateTrialComponentRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(associateTrialComponentRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "AssociateTrialComponent");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new AssociateTrialComponentResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* This action batch describes a list of versioned model packages
*
*
* @param batchDescribeModelPackageRequest
* @return Result of the BatchDescribeModelPackage operation returned by the service.
* @sample AmazonSageMaker.BatchDescribeModelPackage
* @see AWS API Documentation
*/
@Override
public BatchDescribeModelPackageResult batchDescribeModelPackage(BatchDescribeModelPackageRequest request) {
request = beforeClientExecution(request);
return executeBatchDescribeModelPackage(request);
}
@SdkInternalApi
final BatchDescribeModelPackageResult executeBatchDescribeModelPackage(BatchDescribeModelPackageRequest batchDescribeModelPackageRequest) {
ExecutionContext executionContext = createExecutionContext(batchDescribeModelPackageRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new BatchDescribeModelPackageRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(batchDescribeModelPackageRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "BatchDescribeModelPackage");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new BatchDescribeModelPackageResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an action. An action is a lineage tracking entity that represents an action or activity. For
* example, a model deployment or an HPO job. Generally, an action involves at least one input or output artifact.
* For more information, see Amazon
* SageMaker ML Lineage Tracking.
*
*
* @param createActionRequest
* @return Result of the CreateAction operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateAction
* @see AWS API
* Documentation
*/
@Override
public CreateActionResult createAction(CreateActionRequest request) {
request = beforeClientExecution(request);
return executeCreateAction(request);
}
@SdkInternalApi
final CreateActionResult executeCreateAction(CreateActionRequest createActionRequest) {
ExecutionContext executionContext = createExecutionContext(createActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateActionRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateActionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Create a machine learning algorithm that you can use in SageMaker and list in the Amazon Web Services
* Marketplace.
*
*
* @param createAlgorithmRequest
* @return Result of the CreateAlgorithm operation returned by the service.
* @sample AmazonSageMaker.CreateAlgorithm
* @see AWS API
* Documentation
*/
@Override
public CreateAlgorithmResult createAlgorithm(CreateAlgorithmRequest request) {
request = beforeClientExecution(request);
return executeCreateAlgorithm(request);
}
@SdkInternalApi
final CreateAlgorithmResult executeCreateAlgorithm(CreateAlgorithmRequest createAlgorithmRequest) {
ExecutionContext executionContext = createExecutionContext(createAlgorithmRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateAlgorithmRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createAlgorithmRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateAlgorithm");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateAlgorithmResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a running app for the specified UserProfile. This operation is automatically invoked by Amazon SageMaker
* upon access to the associated Domain, and when new kernel configurations are selected by the user. A user may
* have multiple Apps active simultaneously.
*
*
* @param createAppRequest
* @return Result of the CreateApp operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateApp
* @see AWS API
* Documentation
*/
@Override
public CreateAppResult createApp(CreateAppRequest request) {
request = beforeClientExecution(request);
return executeCreateApp(request);
}
@SdkInternalApi
final CreateAppResult executeCreateApp(CreateAppRequest createAppRequest) {
ExecutionContext executionContext = createExecutionContext(createAppRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateAppRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createAppRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateApp");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateAppResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the
* Amazon Elastic File System storage volume on the image, and a list of the kernels in the image.
*
*
* @param createAppImageConfigRequest
* @return Result of the CreateAppImageConfig operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateAppImageConfig
* @see AWS
* API Documentation
*/
@Override
public CreateAppImageConfigResult createAppImageConfig(CreateAppImageConfigRequest request) {
request = beforeClientExecution(request);
return executeCreateAppImageConfig(request);
}
@SdkInternalApi
final CreateAppImageConfigResult executeCreateAppImageConfig(CreateAppImageConfigRequest createAppImageConfigRequest) {
ExecutionContext executionContext = createExecutionContext(createAppImageConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateAppImageConfigRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createAppImageConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateAppImageConfig");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateAppImageConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an artifact. An artifact is a lineage tracking entity that represents a URI addressable object or
* data. Some examples are the S3 URI of a dataset and the ECR registry path of an image. For more information, see
* Amazon SageMaker ML Lineage
* Tracking.
*
*
* @param createArtifactRequest
* @return Result of the CreateArtifact operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateArtifact
* @see AWS API
* Documentation
*/
@Override
public CreateArtifactResult createArtifact(CreateArtifactRequest request) {
request = beforeClientExecution(request);
return executeCreateArtifact(request);
}
@SdkInternalApi
final CreateArtifactResult executeCreateArtifact(CreateArtifactRequest createArtifactRequest) {
ExecutionContext executionContext = createExecutionContext(createArtifactRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateArtifactRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createArtifactRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateArtifact");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateArtifactResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an Autopilot job also referred to as Autopilot experiment or AutoML job.
*
*
*
* We recommend using the new versions CreateAutoMLJobV2
* and
* DescribeAutoMLJobV2, which offer backward compatibility.
*
*
* CreateAutoMLJobV2
can manage tabular problem types identical to those of its previous version
* CreateAutoMLJob
, as well as time-series forecasting, non-tabular problem types such as image or text
* classification, and text generation (LLMs fine-tuning).
*
*
* Find guidelines about how to migrate a CreateAutoMLJob
to CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
*
*
*
* You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.
*
*
* @param createAutoMLJobRequest
* @return Result of the CreateAutoMLJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateAutoMLJob
* @see AWS API
* Documentation
*/
@Override
public CreateAutoMLJobResult createAutoMLJob(CreateAutoMLJobRequest request) {
request = beforeClientExecution(request);
return executeCreateAutoMLJob(request);
}
@SdkInternalApi
final CreateAutoMLJobResult executeCreateAutoMLJob(CreateAutoMLJobRequest createAutoMLJobRequest) {
ExecutionContext executionContext = createExecutionContext(createAutoMLJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateAutoMLJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createAutoMLJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateAutoMLJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateAutoMLJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2.
*
*
*
* CreateAutoMLJobV2
* and
* DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and
* DescribeAutoMLJob
* which offer backward compatibility.
*
*
* CreateAutoMLJobV2
can manage tabular problem types identical to those of its previous version
* CreateAutoMLJob
, as well as time-series forecasting, non-tabular problem types such as image or text
* classification, and text generation (LLMs fine-tuning).
*
*
* Find guidelines about how to migrate a CreateAutoMLJob
to CreateAutoMLJobV2
in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
*
*
*
* For the list of available problem types supported by CreateAutoMLJobV2
, see AutoMLProblemTypeConfig.
*
*
* You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.
*
*
* @param createAutoMLJobV2Request
* @return Result of the CreateAutoMLJobV2 operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateAutoMLJobV2
* @see AWS
* API Documentation
*/
@Override
public CreateAutoMLJobV2Result createAutoMLJobV2(CreateAutoMLJobV2Request request) {
request = beforeClientExecution(request);
return executeCreateAutoMLJobV2(request);
}
@SdkInternalApi
final CreateAutoMLJobV2Result executeCreateAutoMLJobV2(CreateAutoMLJobV2Request createAutoMLJobV2Request) {
ExecutionContext executionContext = createExecutionContext(createAutoMLJobV2Request);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateAutoMLJobV2RequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createAutoMLJobV2Request));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateAutoMLJobV2");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateAutoMLJobV2ResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for creating and managing
* persistent clusters for developing large machine learning models, such as large language models (LLMs) and
* diffusion models. To learn more, see Amazon SageMaker HyperPod in
* the Amazon SageMaker Developer Guide.
*
*
* @param createClusterRequest
* @return Result of the CreateCluster operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateCluster
* @see AWS API
* Documentation
*/
@Override
public CreateClusterResult createCluster(CreateClusterRequest request) {
request = beforeClientExecution(request);
return executeCreateCluster(request);
}
@SdkInternalApi
final CreateClusterResult executeCreateCluster(CreateClusterRequest createClusterRequest) {
ExecutionContext executionContext = createExecutionContext(createClusterRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateClusterRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createClusterRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateCluster");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateClusterResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a Git repository as a resource in your SageMaker account. You can associate the repository with notebook
* instances so that you can use Git source control for the notebooks you create. The Git repository is a resource
* in your SageMaker account, so it can be associated with more than one notebook instance, and it persists
* independently from the lifecycle of any notebook instances it is associated with.
*
*
* The repository can be hosted either in Amazon Web Services CodeCommit or
* in any other Git repository.
*
*
* @param createCodeRepositoryRequest
* @return Result of the CreateCodeRepository operation returned by the service.
* @sample AmazonSageMaker.CreateCodeRepository
* @see AWS
* API Documentation
*/
@Override
public CreateCodeRepositoryResult createCodeRepository(CreateCodeRepositoryRequest request) {
request = beforeClientExecution(request);
return executeCreateCodeRepository(request);
}
@SdkInternalApi
final CreateCodeRepositoryResult executeCreateCodeRepository(CreateCodeRepositoryRequest createCodeRepositoryRequest) {
ExecutionContext executionContext = createExecutionContext(createCodeRepositoryRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateCodeRepositoryRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createCodeRepositoryRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateCodeRepository");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateCodeRepositoryResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model
* artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.
*
*
* If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model
* artifacts as part of the model. You can also use the artifacts with Amazon Web Services IoT Greengrass. In that
* case, deploy them as an ML resource.
*
*
* In the request body, you provide the following:
*
*
* -
*
* A name for the compilation job
*
*
* -
*
* Information about the input model artifacts
*
*
* -
*
* The output location for the compiled model and the device (target) that the model runs on
*
*
* -
*
* The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the model compilation
* job.
*
*
*
*
* You can also provide a Tag
to track the model compilation job's resource use and costs. The response
* body contains the CompilationJobArn
for the compiled job.
*
*
* To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.
*
*
* @param createCompilationJobRequest
* @return Result of the CreateCompilationJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateCompilationJob
* @see AWS
* API Documentation
*/
@Override
public CreateCompilationJobResult createCompilationJob(CreateCompilationJobRequest request) {
request = beforeClientExecution(request);
return executeCreateCompilationJob(request);
}
@SdkInternalApi
final CreateCompilationJobResult executeCreateCompilationJob(CreateCompilationJobRequest createCompilationJobRequest) {
ExecutionContext executionContext = createExecutionContext(createCompilationJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateCompilationJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createCompilationJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateCompilationJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateCompilationJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a context. A context is a lineage tracking entity that represents a logical grouping of other
* tracking or experiment entities. Some examples are an endpoint and a model package. For more information, see Amazon SageMaker ML Lineage
* Tracking.
*
*
* @param createContextRequest
* @return Result of the CreateContext operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateContext
* @see AWS API
* Documentation
*/
@Override
public CreateContextResult createContext(CreateContextRequest request) {
request = beforeClientExecution(request);
return executeCreateContext(request);
}
@SdkInternalApi
final CreateContextResult executeCreateContext(CreateContextRequest createContextRequest) {
ExecutionContext executionContext = createExecutionContext(createContextRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateContextRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createContextRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateContext");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateContextResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor.
*
*
* @param createDataQualityJobDefinitionRequest
* @return Result of the CreateDataQualityJobDefinition operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateDataQualityJobDefinition
* @see AWS API Documentation
*/
@Override
public CreateDataQualityJobDefinitionResult createDataQualityJobDefinition(CreateDataQualityJobDefinitionRequest request) {
request = beforeClientExecution(request);
return executeCreateDataQualityJobDefinition(request);
}
@SdkInternalApi
final CreateDataQualityJobDefinitionResult executeCreateDataQualityJobDefinition(CreateDataQualityJobDefinitionRequest createDataQualityJobDefinitionRequest) {
ExecutionContext executionContext = createExecutionContext(createDataQualityJobDefinitionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateDataQualityJobDefinitionRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createDataQualityJobDefinitionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateDataQualityJobDefinition");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateDataQualityJobDefinitionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a device fleet.
*
*
* @param createDeviceFleetRequest
* @return Result of the CreateDeviceFleet operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateDeviceFleet
* @see AWS
* API Documentation
*/
@Override
public CreateDeviceFleetResult createDeviceFleet(CreateDeviceFleetRequest request) {
request = beforeClientExecution(request);
return executeCreateDeviceFleet(request);
}
@SdkInternalApi
final CreateDeviceFleetResult executeCreateDeviceFleet(CreateDeviceFleetRequest createDeviceFleetRequest) {
ExecutionContext executionContext = createExecutionContext(createDeviceFleetRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateDeviceFleetRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createDeviceFleetRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateDeviceFleet");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateDeviceFleetResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a Domain
. A domain consists of an associated Amazon Elastic File System volume, a list of
* authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC)
* configurations. Users within a domain can share notebook files and other artifacts with each other.
*
*
* EFS storage
*
*
* When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user
* receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.
*
*
* SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume
* attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a
* customer managed key. For more information, see Protect Data at Rest Using
* Encryption.
*
*
* VPC configuration
*
*
* All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other
* traffic, you can specify the AppNetworkAccessType
parameter. AppNetworkAccessType
* corresponds to the network access type that you choose when you onboard to the domain. The following options are
* available:
*
*
* -
*
* PublicInternetOnly
- Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows
* internet access. This is the default value.
*
*
* -
*
* VpcOnly
- All traffic is through the specified VPC and subnets. Internet access is disabled by
* default. To allow internet access, you must specify a NAT gateway.
*
*
* When internet access is disabled, you won't be able to run a Amazon SageMaker Studio notebook or to train or host
* models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your
* security groups allow outbound connections.
*
*
*
*
*
* NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a
* Amazon SageMaker Studio app successfully.
*
*
*
* For more information, see Connect Amazon
* SageMaker Studio Notebooks to Resources in a VPC.
*
*
* @param createDomainRequest
* @return Result of the CreateDomain operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateDomain
* @see AWS API
* Documentation
*/
@Override
public CreateDomainResult createDomain(CreateDomainRequest request) {
request = beforeClientExecution(request);
return executeCreateDomain(request);
}
@SdkInternalApi
final CreateDomainResult executeCreateDomain(CreateDomainRequest createDomainRequest) {
ExecutionContext executionContext = createExecutionContext(createDomainRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateDomainRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createDomainRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateDomain");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateDomainResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment
* configuration and devices.
*
*
* @param createEdgeDeploymentPlanRequest
* @return Result of the CreateEdgeDeploymentPlan operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateEdgeDeploymentPlan
* @see AWS API Documentation
*/
@Override
public CreateEdgeDeploymentPlanResult createEdgeDeploymentPlan(CreateEdgeDeploymentPlanRequest request) {
request = beforeClientExecution(request);
return executeCreateEdgeDeploymentPlan(request);
}
@SdkInternalApi
final CreateEdgeDeploymentPlanResult executeCreateEdgeDeploymentPlan(CreateEdgeDeploymentPlanRequest createEdgeDeploymentPlanRequest) {
ExecutionContext executionContext = createExecutionContext(createEdgeDeploymentPlanRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateEdgeDeploymentPlanRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createEdgeDeploymentPlanRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateEdgeDeploymentPlan");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateEdgeDeploymentPlanResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a new stage in an existing edge deployment plan.
*
*
* @param createEdgeDeploymentStageRequest
* @return Result of the CreateEdgeDeploymentStage operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateEdgeDeploymentStage
* @see AWS API Documentation
*/
@Override
public CreateEdgeDeploymentStageResult createEdgeDeploymentStage(CreateEdgeDeploymentStageRequest request) {
request = beforeClientExecution(request);
return executeCreateEdgeDeploymentStage(request);
}
@SdkInternalApi
final CreateEdgeDeploymentStageResult executeCreateEdgeDeploymentStage(CreateEdgeDeploymentStageRequest createEdgeDeploymentStageRequest) {
ExecutionContext executionContext = createExecutionContext(createEdgeDeploymentStageRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateEdgeDeploymentStageRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createEdgeDeploymentStageRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateEdgeDeploymentStage");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateEdgeDeploymentStageResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts a SageMaker Edge Manager model packaging job. Edge Manager will use the model artifacts from the Amazon
* Simple Storage Service bucket that you specify. After the model has been packaged, Amazon SageMaker saves the
* resulting artifacts to an S3 bucket that you specify.
*
*
* @param createEdgePackagingJobRequest
* @return Result of the CreateEdgePackagingJob operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateEdgePackagingJob
* @see AWS API Documentation
*/
@Override
public CreateEdgePackagingJobResult createEdgePackagingJob(CreateEdgePackagingJobRequest request) {
request = beforeClientExecution(request);
return executeCreateEdgePackagingJob(request);
}
@SdkInternalApi
final CreateEdgePackagingJobResult executeCreateEdgePackagingJob(CreateEdgePackagingJobRequest createEdgePackagingJobRequest) {
ExecutionContext executionContext = createExecutionContext(createEdgePackagingJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateEdgePackagingJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createEdgePackagingJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateEdgePackagingJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateEdgePackagingJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an endpoint using the endpoint configuration specified in the request. SageMaker uses the endpoint to
* provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.
*
*
* Use this API to deploy models using SageMaker hosting services.
*
*
*
* You must not delete an EndpointConfig
that is in use by an endpoint that is live or while the
* UpdateEndpoint
or CreateEndpoint
operations are being performed on the endpoint. To
* update an endpoint, you must create a new EndpointConfig
.
*
*
*
* The endpoint name must be unique within an Amazon Web Services Region in your Amazon Web Services account.
*
*
* When it receives the request, SageMaker creates the endpoint, launches the resources (ML compute instances), and
* deploys the model(s) on them.
*
*
*
* When you call CreateEndpoint, a
* load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a
* DynamoDB table supporting
* Eventually Consistent Reads
, the response might not reflect the results of a recently completed
* write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB,
* this causes a validation error. If you repeat your read request after a short time, the response should return
* the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers
* call
* DescribeEndpointConfig before calling CreateEndpoint to
* minimize the potential impact of a DynamoDB eventually consistent read.
*
*
*
* When SageMaker receives the request, it sets the endpoint status to Creating
. After it creates the
* endpoint, it sets the status to InService
. SageMaker can then process incoming requests for
* inferences. To check the status of an endpoint, use the DescribeEndpoint
* API.
*
*
* If any of the models hosted at this endpoint get model data from an Amazon S3 location, SageMaker uses Amazon Web
* Services Security Token Service to download model artifacts from the S3 path you provided. Amazon Web Services
* STS is activated in your Amazon Web Services account by default. If you previously deactivated Amazon Web
* Services STS for a region, you need to reactivate Amazon Web Services STS for that region. For more information,
* see Activating
* and Deactivating Amazon Web Services STS in an Amazon Web Services Region in the Amazon Web Services
* Identity and Access Management User Guide.
*
*
*
* To add the IAM role policies for using this API operation, go to the IAM console, and choose Roles in the left navigation pane. Search
* the IAM role that you want to grant access to use the CreateEndpoint and
*
* CreateEndpointConfig API operations, add the following policies to the role.
*
*
* -
*
* Option 1: For a full SageMaker access, search and attach the AmazonSageMakerFullAccess
policy.
*
*
* -
*
* Option 2: For granting a limited access to an IAM role, paste the following Action elements manually into the
* JSON file of the IAM role:
*
*
* "Action": ["sagemaker:CreateEndpoint", "sagemaker:CreateEndpointConfig"]
*
*
* "Resource": [
*
*
* "arn:aws:sagemaker:region:account-id:endpoint/endpointName"
*
*
* "arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName"
*
*
* ]
*
*
* For more information, see SageMaker API Permissions:
* Actions, Permissions, and Resources Reference.
*
*
*
*
*
* @param createEndpointRequest
* @return Result of the CreateEndpoint operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateEndpoint
* @see AWS API
* Documentation
*/
@Override
public CreateEndpointResult createEndpoint(CreateEndpointRequest request) {
request = beforeClientExecution(request);
return executeCreateEndpoint(request);
}
@SdkInternalApi
final CreateEndpointResult executeCreateEndpoint(CreateEndpointRequest createEndpointRequest) {
ExecutionContext executionContext = createExecutionContext(createEndpointRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateEndpointRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createEndpointRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateEndpoint");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateEndpointResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an endpoint configuration that SageMaker hosting services uses to deploy models. In the configuration,
* you identify one or more models, created using the CreateModel
API, to deploy and the resources that
* you want SageMaker to provision. Then you call the CreateEndpoint API.
*
*
*
* Use this API if you want to use SageMaker hosting services to deploy models into production.
*
*
*
* In the request, you define a ProductionVariant
, for each model that you want to deploy. Each
* ProductionVariant
parameter also describes the resources that you want SageMaker to provision. This
* includes the number and type of ML compute instances to deploy.
*
*
* If you are hosting multiple models, you also assign a VariantWeight
to specify how much traffic you
* want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign
* traffic weight 2 for model A and 1 for model B. SageMaker distributes two-thirds of the traffic to Model A, and
* one-third to model B.
*
*
*
* When you call CreateEndpoint, a
* load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a
* DynamoDB table supporting
* Eventually Consistent Reads
, the response might not reflect the results of a recently completed
* write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB,
* this causes a validation error. If you repeat your read request after a short time, the response should return
* the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers
* call
* DescribeEndpointConfig before calling CreateEndpoint to
* minimize the potential impact of a DynamoDB eventually consistent read.
*
*
*
* @param createEndpointConfigRequest
* @return Result of the CreateEndpointConfig operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateEndpointConfig
* @see AWS
* API Documentation
*/
@Override
public CreateEndpointConfigResult createEndpointConfig(CreateEndpointConfigRequest request) {
request = beforeClientExecution(request);
return executeCreateEndpointConfig(request);
}
@SdkInternalApi
final CreateEndpointConfigResult executeCreateEndpointConfig(CreateEndpointConfigRequest createEndpointConfigRequest) {
ExecutionContext executionContext = createExecutionContext(createEndpointConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateEndpointConfigRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createEndpointConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateEndpointConfig");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateEndpointConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a SageMaker experiment. An experiment is a collection of trials that are observed, compared
* and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine
* learning model.
*
*
*
* In the Studio UI, trials are referred to as run groups and trial components are referred to as
* runs.
*
*
*
* The goal of an experiment is to determine the components that produce the best model. Multiple trials are
* performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the
* remaining inputs constant.
*
*
* When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are
* automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must
* use the logging APIs provided by the SDK.
*
*
* You can add tags to experiments, trials, trial components and then use the Search API to search for the
* tags.
*
*
* To add a description to an experiment, specify the optional Description
parameter. To add a
* description later, or to change the description, call the UpdateExperiment
* API.
*
*
* To get a list of all your experiments, call the ListExperiments
* API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To
* create a trial call the CreateTrial API.
*
*
* @param createExperimentRequest
* @return Result of the CreateExperiment operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateExperiment
* @see AWS API
* Documentation
*/
@Override
public CreateExperimentResult createExperiment(CreateExperimentRequest request) {
request = beforeClientExecution(request);
return executeCreateExperiment(request);
}
@SdkInternalApi
final CreateExperimentResult executeCreateExperiment(CreateExperimentRequest createExperimentRequest) {
ExecutionContext executionContext = createExecutionContext(createExperimentRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateExperimentRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createExperimentRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateExperiment");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateExperimentResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Create a new FeatureGroup
. A FeatureGroup
is a group of Features
defined
* in the FeatureStore
to describe a Record
.
*
*
* The FeatureGroup
defines the schema and features contained in the FeatureGroup
. A
* FeatureGroup
definition is composed of a list of Features
, a
* RecordIdentifierFeatureName
, an EventTimeFeatureName
and configurations for its
* OnlineStore
and OfflineStore
. Check Amazon Web Services service
* quotas to see the FeatureGroup
s quota for your Amazon Web Services account.
*
*
* Note that it can take approximately 10-15 minutes to provision an OnlineStore
* FeatureGroup
with the InMemory
StorageType
.
*
*
*
* You must include at least one of OnlineStoreConfig
and OfflineStoreConfig
to create a
* FeatureGroup
.
*
*
*
* @param createFeatureGroupRequest
* @return Result of the CreateFeatureGroup operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateFeatureGroup
* @see AWS
* API Documentation
*/
@Override
public CreateFeatureGroupResult createFeatureGroup(CreateFeatureGroupRequest request) {
request = beforeClientExecution(request);
return executeCreateFeatureGroup(request);
}
@SdkInternalApi
final CreateFeatureGroupResult executeCreateFeatureGroup(CreateFeatureGroupRequest createFeatureGroupRequest) {
ExecutionContext executionContext = createExecutionContext(createFeatureGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateFeatureGroupRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createFeatureGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateFeatureGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateFeatureGroupResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a flow definition.
*
*
* @param createFlowDefinitionRequest
* @return Result of the CreateFlowDefinition operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateFlowDefinition
* @see AWS
* API Documentation
*/
@Override
public CreateFlowDefinitionResult createFlowDefinition(CreateFlowDefinitionRequest request) {
request = beforeClientExecution(request);
return executeCreateFlowDefinition(request);
}
@SdkInternalApi
final CreateFlowDefinitionResult executeCreateFlowDefinition(CreateFlowDefinitionRequest createFlowDefinitionRequest) {
ExecutionContext executionContext = createExecutionContext(createFlowDefinitionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateFlowDefinitionRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createFlowDefinitionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateFlowDefinition");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateFlowDefinitionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Create a hub.
*
*
* @param createHubRequest
* @return Result of the CreateHub operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateHub
* @see AWS API
* Documentation
*/
@Override
public CreateHubResult createHub(CreateHubRequest request) {
request = beforeClientExecution(request);
return executeCreateHub(request);
}
@SdkInternalApi
final CreateHubResult executeCreateHub(CreateHubRequest createHubRequest) {
ExecutionContext executionContext = createExecutionContext(createHubRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateHubRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createHubRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateHub");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateHubResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Create a hub content reference in order to add a model in the JumpStart public hub to a private hub.
*
*
* @param createHubContentReferenceRequest
* @return Result of the CreateHubContentReference operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateHubContentReference
* @see AWS API Documentation
*/
@Override
public CreateHubContentReferenceResult createHubContentReference(CreateHubContentReferenceRequest request) {
request = beforeClientExecution(request);
return executeCreateHubContentReference(request);
}
@SdkInternalApi
final CreateHubContentReferenceResult executeCreateHubContentReference(CreateHubContentReferenceRequest createHubContentReferenceRequest) {
ExecutionContext executionContext = createExecutionContext(createHubContentReferenceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateHubContentReferenceRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createHubContentReferenceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateHubContentReference");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateHubContentReferenceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Defines the settings you will use for the human review workflow user interface. Reviewers will see a three-panel
* interface with an instruction area, the item to review, and an input area.
*
*
* @param createHumanTaskUiRequest
* @return Result of the CreateHumanTaskUi operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateHumanTaskUi
* @see AWS
* API Documentation
*/
@Override
public CreateHumanTaskUiResult createHumanTaskUi(CreateHumanTaskUiRequest request) {
request = beforeClientExecution(request);
return executeCreateHumanTaskUi(request);
}
@SdkInternalApi
final CreateHumanTaskUiResult executeCreateHumanTaskUi(CreateHumanTaskUiRequest createHumanTaskUiRequest) {
ExecutionContext executionContext = createExecutionContext(createHumanTaskUiRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateHumanTaskUiRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createHumanTaskUiRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateHumanTaskUi");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateHumanTaskUiResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version of a model by running many
* training jobs on your dataset using the algorithm you choose and values for hyperparameters within ranges that
* you specify. It then chooses the hyperparameter values that result in a model that performs the best, as measured
* by an objective metric that you choose.
*
*
* A hyperparameter tuning job automatically creates Amazon SageMaker experiments, trials, and trial components for
* each training job that it runs. You can view these entities in Amazon SageMaker Studio. For more information, see
* View
* Experiments, Trials, and Trial Components.
*
*
*
* Do not include any security-sensitive information including account access IDs, secrets or tokens in any
* hyperparameter field. If the use of security-sensitive credentials are detected, SageMaker will reject your
* training job request and return an exception error.
*
*
*
* @param createHyperParameterTuningJobRequest
* @return Result of the CreateHyperParameterTuningJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateHyperParameterTuningJob
* @see AWS API Documentation
*/
@Override
public CreateHyperParameterTuningJobResult createHyperParameterTuningJob(CreateHyperParameterTuningJobRequest request) {
request = beforeClientExecution(request);
return executeCreateHyperParameterTuningJob(request);
}
@SdkInternalApi
final CreateHyperParameterTuningJobResult executeCreateHyperParameterTuningJob(CreateHyperParameterTuningJobRequest createHyperParameterTuningJobRequest) {
ExecutionContext executionContext = createExecutionContext(createHyperParameterTuningJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateHyperParameterTuningJobRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createHyperParameterTuningJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateHyperParameterTuningJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateHyperParameterTuningJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a
* container image stored in Amazon ECR. For more information, see Bring your own SageMaker image.
*
*
* @param createImageRequest
* @return Result of the CreateImage operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateImage
* @see AWS API
* Documentation
*/
@Override
public CreateImageResult createImage(CreateImageRequest request) {
request = beforeClientExecution(request);
return executeCreateImage(request);
}
@SdkInternalApi
final CreateImageResult executeCreateImage(CreateImageRequest createImageRequest) {
ExecutionContext executionContext = createExecutionContext(createImageRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateImageRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createImageRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateImage");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateImageResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a version of the SageMaker image specified by ImageName
. The version represents the Amazon
* ECR container image specified by BaseImage
.
*
*
* @param createImageVersionRequest
* @return Result of the CreateImageVersion operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.CreateImageVersion
* @see AWS
* API Documentation
*/
@Override
public CreateImageVersionResult createImageVersion(CreateImageVersionRequest request) {
request = beforeClientExecution(request);
return executeCreateImageVersion(request);
}
@SdkInternalApi
final CreateImageVersionResult executeCreateImageVersion(CreateImageVersionRequest createImageVersionRequest) {
ExecutionContext executionContext = createExecutionContext(createImageVersionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateImageVersionRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createImageVersionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateImageVersion");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateImageVersionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an inference component, which is a SageMaker hosting object that you can use to deploy a model to an
* endpoint. In the inference component settings, you specify the model, the endpoint, and how the model utilizes
* the resources that the endpoint hosts. You can optimize resource utilization by tailoring how the required CPU
* cores, accelerators, and memory are allocated. You can deploy multiple inference components to an endpoint, where
* each inference component contains one model and the resource utilization needs for that individual model. After
* you deploy an inference component, you can directly invoke the associated model when you use the InvokeEndpoint
* API action.
*
*
* @param createInferenceComponentRequest
* @return Result of the CreateInferenceComponent operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateInferenceComponent
* @see AWS API Documentation
*/
@Override
public CreateInferenceComponentResult createInferenceComponent(CreateInferenceComponentRequest request) {
request = beforeClientExecution(request);
return executeCreateInferenceComponent(request);
}
@SdkInternalApi
final CreateInferenceComponentResult executeCreateInferenceComponent(CreateInferenceComponentRequest createInferenceComponentRequest) {
ExecutionContext executionContext = createExecutionContext(createInferenceComponentRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateInferenceComponentRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createInferenceComponentRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateInferenceComponent");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateInferenceComponentResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an inference experiment using the configurations specified in the request.
*
*
* Use this API to setup and schedule an experiment to compare model variants on a Amazon SageMaker inference
* endpoint. For more information about inference experiments, see Shadow tests.
*
*
* Amazon SageMaker begins your experiment at the scheduled time and routes traffic to your endpoint's model
* variants based on your specified configuration.
*
*
* While the experiment is in progress or after it has concluded, you can view metrics that compare your model
* variants. For more information, see View, monitor, and
* edit shadow tests.
*
*
* @param createInferenceExperimentRequest
* @return Result of the CreateInferenceExperiment operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateInferenceExperiment
* @see AWS API Documentation
*/
@Override
public CreateInferenceExperimentResult createInferenceExperiment(CreateInferenceExperimentRequest request) {
request = beforeClientExecution(request);
return executeCreateInferenceExperiment(request);
}
@SdkInternalApi
final CreateInferenceExperimentResult executeCreateInferenceExperiment(CreateInferenceExperimentRequest createInferenceExperimentRequest) {
ExecutionContext executionContext = createExecutionContext(createInferenceExperimentRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateInferenceExperimentRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createInferenceExperimentRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateInferenceExperiment");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateInferenceExperimentResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts a recommendation job. You can create either an instance recommendation or load test job.
*
*
* @param createInferenceRecommendationsJobRequest
* @return Result of the CreateInferenceRecommendationsJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateInferenceRecommendationsJob
* @see AWS API Documentation
*/
@Override
public CreateInferenceRecommendationsJobResult createInferenceRecommendationsJob(CreateInferenceRecommendationsJobRequest request) {
request = beforeClientExecution(request);
return executeCreateInferenceRecommendationsJob(request);
}
@SdkInternalApi
final CreateInferenceRecommendationsJobResult executeCreateInferenceRecommendationsJob(
CreateInferenceRecommendationsJobRequest createInferenceRecommendationsJobRequest) {
ExecutionContext executionContext = createExecutionContext(createInferenceRecommendationsJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateInferenceRecommendationsJobRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createInferenceRecommendationsJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateInferenceRecommendationsJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateInferenceRecommendationsJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to
* train machine learning models.
*
*
* You can select your workforce from one of three providers:
*
*
* -
*
* A private workforce that you create. It can include employees, contractors, and outside experts. Use a private
* workforce when want the data to stay within your organization or when a specific set of skills is required.
*
*
* -
*
* One or more vendors that you select from the Amazon Web Services Marketplace. Vendors provide expertise in
* specific areas.
*
*
* -
*
* The Amazon Mechanical Turk workforce. This is the largest workforce, but it should only be used for public data
* or data that has been stripped of any personally identifiable information.
*
*
*
*
* You can also use automated data labeling to reduce the number of data objects that need to be labeled by a
* human. Automated data labeling uses active learning to determine if a data object can be labeled by
* machine or if it needs to be sent to a human worker. For more information, see Using Automated Data
* Labeling.
*
*
* The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that
* describes the location of each object. For more information, see Using Input and Output Data.
*
*
* The output can be used as the manifest file for another labeling job or as training data for your machine
* learning models.
*
*
* You can use this operation to create a static labeling job or a streaming labeling job. A static labeling job
* stops if all data objects in the input manifest file identified in ManifestS3Uri
have been labeled.
* A streaming labeling job runs perpetually until it is manually stopped, or remains idle for 10 days. You can send
* new data objects to an active (InProgress
) streaming labeling job in real time. To learn how to
* create a static labeling job, see Create a Labeling Job
* (API) in the Amazon SageMaker Developer Guide. To learn how to create a streaming labeling job, see Create a Streaming Labeling
* Job.
*
*
* @param createLabelingJobRequest
* @return Result of the CreateLabelingJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateLabelingJob
* @see AWS
* API Documentation
*/
@Override
public CreateLabelingJobResult createLabelingJob(CreateLabelingJobRequest request) {
request = beforeClientExecution(request);
return executeCreateLabelingJob(request);
}
@SdkInternalApi
final CreateLabelingJobResult executeCreateLabelingJob(CreateLabelingJobRequest createLabelingJobRequest) {
ExecutionContext executionContext = createExecutionContext(createLabelingJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateLabelingJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createLabelingJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateLabelingJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateLabelingJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an MLflow Tracking Server using a general purpose Amazon S3 bucket as the artifact store. For more
* information, see Create an MLflow
* Tracking Server.
*
*
* @param createMlflowTrackingServerRequest
* @return Result of the CreateMlflowTrackingServer operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateMlflowTrackingServer
* @see AWS API Documentation
*/
@Override
public CreateMlflowTrackingServerResult createMlflowTrackingServer(CreateMlflowTrackingServerRequest request) {
request = beforeClientExecution(request);
return executeCreateMlflowTrackingServer(request);
}
@SdkInternalApi
final CreateMlflowTrackingServerResult executeCreateMlflowTrackingServer(CreateMlflowTrackingServerRequest createMlflowTrackingServerRequest) {
ExecutionContext executionContext = createExecutionContext(createMlflowTrackingServerRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateMlflowTrackingServerRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createMlflowTrackingServerRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateMlflowTrackingServer");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateMlflowTrackingServerResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a model in SageMaker. In the request, you name the model and describe a primary container. For the
* primary container, you specify the Docker image that contains inference code, artifacts (from prior training),
* and a custom environment map that the inference code uses when you deploy the model for predictions.
*
*
* Use this API to create a model if you want to use SageMaker hosting services or run a batch transform job.
*
*
* To host your model, you create an endpoint configuration with the CreateEndpointConfig
API, and then
* create an endpoint with the CreateEndpoint
API. SageMaker then deploys all of the containers that
* you defined for the model in the hosting environment.
*
*
* To run a batch transform using your model, you start a job with the CreateTransformJob
API.
* SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location.
*
*
* In the request, you also provide an IAM role that SageMaker can assume to access model artifacts and docker image
* for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM
* role to manage permissions the inference code needs. For example, if the inference code access any other Amazon
* Web Services resources, you grant necessary permissions via this role.
*
*
* @param createModelRequest
* @return Result of the CreateModel operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateModel
* @see AWS API
* Documentation
*/
@Override
public CreateModelResult createModel(CreateModelRequest request) {
request = beforeClientExecution(request);
return executeCreateModel(request);
}
@SdkInternalApi
final CreateModelResult executeCreateModel(CreateModelRequest createModelRequest) {
ExecutionContext executionContext = createExecutionContext(createModelRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createModelRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModel");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateModelResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates the definition for a model bias job.
*
*
* @param createModelBiasJobDefinitionRequest
* @return Result of the CreateModelBiasJobDefinition operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateModelBiasJobDefinition
* @see AWS API Documentation
*/
@Override
public CreateModelBiasJobDefinitionResult createModelBiasJobDefinition(CreateModelBiasJobDefinitionRequest request) {
request = beforeClientExecution(request);
return executeCreateModelBiasJobDefinition(request);
}
@SdkInternalApi
final CreateModelBiasJobDefinitionResult executeCreateModelBiasJobDefinition(CreateModelBiasJobDefinitionRequest createModelBiasJobDefinitionRequest) {
ExecutionContext executionContext = createExecutionContext(createModelBiasJobDefinitionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelBiasJobDefinitionRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createModelBiasJobDefinitionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModelBiasJobDefinition");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateModelBiasJobDefinitionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an Amazon SageMaker Model Card.
*
*
* For information about how to use model cards, see Amazon SageMaker Model Card.
*
*
* @param createModelCardRequest
* @return Result of the CreateModelCard operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ConflictException
* There was a conflict when you attempted to modify a SageMaker entity such as an Experiment
* or Artifact
.
* @sample AmazonSageMaker.CreateModelCard
* @see AWS API
* Documentation
*/
@Override
public CreateModelCardResult createModelCard(CreateModelCardRequest request) {
request = beforeClientExecution(request);
return executeCreateModelCard(request);
}
@SdkInternalApi
final CreateModelCardResult executeCreateModelCard(CreateModelCardRequest createModelCardRequest) {
ExecutionContext executionContext = createExecutionContext(createModelCardRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelCardRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createModelCardRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModelCard");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateModelCardResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an Amazon SageMaker Model Card export job.
*
*
* @param createModelCardExportJobRequest
* @return Result of the CreateModelCardExportJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ConflictException
* There was a conflict when you attempted to modify a SageMaker entity such as an Experiment
* or Artifact
.
* @sample AmazonSageMaker.CreateModelCardExportJob
* @see AWS API Documentation
*/
@Override
public CreateModelCardExportJobResult createModelCardExportJob(CreateModelCardExportJobRequest request) {
request = beforeClientExecution(request);
return executeCreateModelCardExportJob(request);
}
@SdkInternalApi
final CreateModelCardExportJobResult executeCreateModelCardExportJob(CreateModelCardExportJobRequest createModelCardExportJobRequest) {
ExecutionContext executionContext = createExecutionContext(createModelCardExportJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelCardExportJobRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createModelCardExportJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModelCardExportJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateModelCardExportJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates the definition for a model explainability job.
*
*
* @param createModelExplainabilityJobDefinitionRequest
* @return Result of the CreateModelExplainabilityJobDefinition operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateModelExplainabilityJobDefinition
* @see AWS API Documentation
*/
@Override
public CreateModelExplainabilityJobDefinitionResult createModelExplainabilityJobDefinition(CreateModelExplainabilityJobDefinitionRequest request) {
request = beforeClientExecution(request);
return executeCreateModelExplainabilityJobDefinition(request);
}
@SdkInternalApi
final CreateModelExplainabilityJobDefinitionResult executeCreateModelExplainabilityJobDefinition(
CreateModelExplainabilityJobDefinitionRequest createModelExplainabilityJobDefinitionRequest) {
ExecutionContext executionContext = createExecutionContext(createModelExplainabilityJobDefinitionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelExplainabilityJobDefinitionRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createModelExplainabilityJobDefinitionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModelExplainabilityJobDefinition");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateModelExplainabilityJobDefinitionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a model package that you can use to create SageMaker models or list on Amazon Web Services Marketplace,
* or a versioned model that is part of a model group. Buyers can subscribe to model packages listed on Amazon Web
* Services Marketplace to create models in SageMaker.
*
*
* To create a model package by specifying a Docker container that contains your inference code and the Amazon S3
* location of your model artifacts, provide values for InferenceSpecification
. To create a model from
* an algorithm resource that you created or subscribed to in Amazon Web Services Marketplace, provide a value for
* SourceAlgorithmSpecification
.
*
*
*
* There are two types of model packages:
*
*
* -
*
* Versioned - a model that is part of a model group in the model registry.
*
*
* -
*
* Unversioned - a model package that is not part of a model group.
*
*
*
*
*
* @param createModelPackageRequest
* @return Result of the CreateModelPackage operation returned by the service.
* @throws ConflictException
* There was a conflict when you attempted to modify a SageMaker entity such as an Experiment
* or Artifact
.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateModelPackage
* @see AWS
* API Documentation
*/
@Override
public CreateModelPackageResult createModelPackage(CreateModelPackageRequest request) {
request = beforeClientExecution(request);
return executeCreateModelPackage(request);
}
@SdkInternalApi
final CreateModelPackageResult executeCreateModelPackage(CreateModelPackageRequest createModelPackageRequest) {
ExecutionContext executionContext = createExecutionContext(createModelPackageRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelPackageRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createModelPackageRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModelPackage");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateModelPackageResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a model group. A model group contains a group of model versions.
*
*
* @param createModelPackageGroupRequest
* @return Result of the CreateModelPackageGroup operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateModelPackageGroup
* @see AWS API Documentation
*/
@Override
public CreateModelPackageGroupResult createModelPackageGroup(CreateModelPackageGroupRequest request) {
request = beforeClientExecution(request);
return executeCreateModelPackageGroup(request);
}
@SdkInternalApi
final CreateModelPackageGroupResult executeCreateModelPackageGroup(CreateModelPackageGroupRequest createModelPackageGroupRequest) {
ExecutionContext executionContext = createExecutionContext(createModelPackageGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelPackageGroupRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createModelPackageGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModelPackageGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateModelPackageGroupResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a definition for a job that monitors model quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor.
*
*
* @param createModelQualityJobDefinitionRequest
* @return Result of the CreateModelQualityJobDefinition operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateModelQualityJobDefinition
* @see AWS API Documentation
*/
@Override
public CreateModelQualityJobDefinitionResult createModelQualityJobDefinition(CreateModelQualityJobDefinitionRequest request) {
request = beforeClientExecution(request);
return executeCreateModelQualityJobDefinition(request);
}
@SdkInternalApi
final CreateModelQualityJobDefinitionResult executeCreateModelQualityJobDefinition(
CreateModelQualityJobDefinitionRequest createModelQualityJobDefinitionRequest) {
ExecutionContext executionContext = createExecutionContext(createModelQualityJobDefinitionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateModelQualityJobDefinitionRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createModelQualityJobDefinitionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateModelQualityJobDefinition");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateModelQualityJobDefinitionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a schedule that regularly starts Amazon SageMaker Processing Jobs to monitor the data captured for an
* Amazon SageMaker Endpoint.
*
*
* @param createMonitoringScheduleRequest
* @return Result of the CreateMonitoringSchedule operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateMonitoringSchedule
* @see AWS API Documentation
*/
@Override
public CreateMonitoringScheduleResult createMonitoringSchedule(CreateMonitoringScheduleRequest request) {
request = beforeClientExecution(request);
return executeCreateMonitoringSchedule(request);
}
@SdkInternalApi
final CreateMonitoringScheduleResult executeCreateMonitoringSchedule(CreateMonitoringScheduleRequest createMonitoringScheduleRequest) {
ExecutionContext executionContext = createExecutionContext(createMonitoringScheduleRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateMonitoringScheduleRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createMonitoringScheduleRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateMonitoringSchedule");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateMonitoringScheduleResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML) compute instance running
* on a Jupyter notebook.
*
*
* In a CreateNotebookInstance
request, specify the type of ML compute instance that you want to run.
* SageMaker launches the instance, installs common libraries that you can use to explore datasets for model
* training, and attaches an ML storage volume to the notebook instance.
*
*
* SageMaker also provides a set of example notebooks. Each notebook demonstrates how to use SageMaker with a
* specific algorithm or with a machine learning framework.
*
*
* After receiving the request, SageMaker does the following:
*
*
* -
*
* Creates a network interface in the SageMaker VPC.
*
*
* -
*
* (Option) If you specified SubnetId
, SageMaker creates a network interface in your own VPC, which is
* inferred from the subnet ID that you provide in the input. When creating this network interface, SageMaker
* attaches the security group that you specified in the request to the network interface that it creates in your
* VPC.
*
*
* -
*
* Launches an EC2 instance of the type specified in the request in the SageMaker VPC. If you specified
* SubnetId
of your VPC, SageMaker specifies both network interfaces when launching this instance. This
* enables inbound traffic from your own VPC to the notebook instance, assuming that the security groups allow it.
*
*
*
*
* After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN). You can't change the name
* of a notebook instance after you create it.
*
*
* After SageMaker creates the notebook instance, you can connect to the Jupyter server and work in Jupyter
* notebooks. For example, you can write code to explore a dataset that you can use for model training, train a
* model, host models by creating SageMaker endpoints, and validate hosted models.
*
*
* For more information, see How It
* Works.
*
*
* @param createNotebookInstanceRequest
* @return Result of the CreateNotebookInstance operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateNotebookInstance
* @see AWS API Documentation
*/
@Override
public CreateNotebookInstanceResult createNotebookInstance(CreateNotebookInstanceRequest request) {
request = beforeClientExecution(request);
return executeCreateNotebookInstance(request);
}
@SdkInternalApi
final CreateNotebookInstanceResult executeCreateNotebookInstance(CreateNotebookInstanceRequest createNotebookInstanceRequest) {
ExecutionContext executionContext = createExecutionContext(createNotebookInstanceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateNotebookInstanceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createNotebookInstanceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateNotebookInstance");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateNotebookInstanceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle
* configuration is a collection of shell scripts that run when you create or start a notebook instance.
*
*
* Each lifecycle configuration script has a limit of 16384 characters.
*
*
* The value of the $PATH
environment variable that is available to both scripts is
* /sbin:bin:/usr/sbin:/usr/bin
.
*
*
* View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group
* /aws/sagemaker/NotebookInstances
in log stream
* [notebook-instance-name]/[LifecycleConfigHook]
.
*
*
* Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes,
* it fails and the notebook instance is not created or started.
*
*
* For information about notebook instance lifestyle configurations, see Step 2.1: (Optional)
* Customize a Notebook Instance.
*
*
* @param createNotebookInstanceLifecycleConfigRequest
* @return Result of the CreateNotebookInstanceLifecycleConfig operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateNotebookInstanceLifecycleConfig
* @see AWS API Documentation
*/
@Override
public CreateNotebookInstanceLifecycleConfigResult createNotebookInstanceLifecycleConfig(CreateNotebookInstanceLifecycleConfigRequest request) {
request = beforeClientExecution(request);
return executeCreateNotebookInstanceLifecycleConfig(request);
}
@SdkInternalApi
final CreateNotebookInstanceLifecycleConfigResult executeCreateNotebookInstanceLifecycleConfig(
CreateNotebookInstanceLifecycleConfigRequest createNotebookInstanceLifecycleConfigRequest) {
ExecutionContext executionContext = createExecutionContext(createNotebookInstanceLifecycleConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateNotebookInstanceLifecycleConfigRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createNotebookInstanceLifecycleConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateNotebookInstanceLifecycleConfig");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateNotebookInstanceLifecycleConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a job that optimizes a model for inference performance. To create the job, you provide the location of a
* source model, and you provide the settings for the optimization techniques that you want the job to apply. When
* the job completes successfully, SageMaker uploads the new optimized model to the output destination that you
* specify.
*
*
* For more information about how to use this action, and about the supported optimization techniques, see Optimize model inference with Amazon
* SageMaker.
*
*
* @param createOptimizationJobRequest
* @return Result of the CreateOptimizationJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateOptimizationJob
* @see AWS API Documentation
*/
@Override
public CreateOptimizationJobResult createOptimizationJob(CreateOptimizationJobRequest request) {
request = beforeClientExecution(request);
return executeCreateOptimizationJob(request);
}
@SdkInternalApi
final CreateOptimizationJobResult executeCreateOptimizationJob(CreateOptimizationJobRequest createOptimizationJobRequest) {
ExecutionContext executionContext = createExecutionContext(createOptimizationJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateOptimizationJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createOptimizationJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateOptimizationJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateOptimizationJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a pipeline using a JSON pipeline definition.
*
*
* @param createPipelineRequest
* @return Result of the CreatePipeline operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ConflictException
* There was a conflict when you attempted to modify a SageMaker entity such as an Experiment
* or Artifact
.
* @sample AmazonSageMaker.CreatePipeline
* @see AWS API
* Documentation
*/
@Override
public CreatePipelineResult createPipeline(CreatePipelineRequest request) {
request = beforeClientExecution(request);
return executeCreatePipeline(request);
}
@SdkInternalApi
final CreatePipelineResult executeCreatePipeline(CreatePipelineRequest createPipelineRequest) {
ExecutionContext executionContext = createExecutionContext(createPipelineRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreatePipelineRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createPipelineRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreatePipeline");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreatePipelineResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be
* automatically signed in to the domain, and granted access to all of the Apps and files associated with the
* Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals
* IAM.
*
*
* The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is
* created, no additional permission is required to access this URL. IAM authorization policies for this API are
* also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.
*
*
* You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or
* Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker
* Studio Through an Interface VPC Endpoint .
*
*
*
* The URL that you get from a call to CreatePresignedDomainUrl
has a default timeout of 5 minutes. You
* can configure this value using ExpiresInSeconds
. If you try to use the URL after the timeout limit
* expires, you are directed to the Amazon Web Services console sign-in page.
*
*
*
* @param createPresignedDomainUrlRequest
* @return Result of the CreatePresignedDomainUrl operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.CreatePresignedDomainUrl
* @see AWS API Documentation
*/
@Override
public CreatePresignedDomainUrlResult createPresignedDomainUrl(CreatePresignedDomainUrlRequest request) {
request = beforeClientExecution(request);
return executeCreatePresignedDomainUrl(request);
}
@SdkInternalApi
final CreatePresignedDomainUrlResult executeCreatePresignedDomainUrl(CreatePresignedDomainUrlRequest createPresignedDomainUrlRequest) {
ExecutionContext executionContext = createExecutionContext(createPresignedDomainUrlRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreatePresignedDomainUrlRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createPresignedDomainUrlRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreatePresignedDomainUrl");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreatePresignedDomainUrlResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns a presigned URL that you can use to connect to the MLflow UI attached to your tracking server. For more
* information, see Launch the
* MLflow UI using a presigned URL.
*
*
* @param createPresignedMlflowTrackingServerUrlRequest
* @return Result of the CreatePresignedMlflowTrackingServerUrl operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.CreatePresignedMlflowTrackingServerUrl
* @see AWS API Documentation
*/
@Override
public CreatePresignedMlflowTrackingServerUrlResult createPresignedMlflowTrackingServerUrl(CreatePresignedMlflowTrackingServerUrlRequest request) {
request = beforeClientExecution(request);
return executeCreatePresignedMlflowTrackingServerUrl(request);
}
@SdkInternalApi
final CreatePresignedMlflowTrackingServerUrlResult executeCreatePresignedMlflowTrackingServerUrl(
CreatePresignedMlflowTrackingServerUrlRequest createPresignedMlflowTrackingServerUrlRequest) {
ExecutionContext executionContext = createExecutionContext(createPresignedMlflowTrackingServerUrlRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreatePresignedMlflowTrackingServerUrlRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createPresignedMlflowTrackingServerUrlRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreatePresignedMlflowTrackingServerUrl");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory
.createResponseHandler(new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreatePresignedMlflowTrackingServerUrlResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker
* console, when you choose Open
next to a notebook instance, SageMaker opens a new tab showing the
* Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.
*
*
* The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the
* presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for
* this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook
* instance.
*
*
* You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify.
* Use the NotIpAddress
condition operator and the aws:SourceIP
condition context key to
* specify the list of IP addresses that you want to have access to the notebook instance. For more information, see
* Limit Access to a Notebook Instance by IP Address.
*
*
*
* The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute
* limit expires, you are directed to the Amazon Web Services console sign-in page.
*
*
*
* @param createPresignedNotebookInstanceUrlRequest
* @return Result of the CreatePresignedNotebookInstanceUrl operation returned by the service.
* @sample AmazonSageMaker.CreatePresignedNotebookInstanceUrl
* @see AWS API Documentation
*/
@Override
public CreatePresignedNotebookInstanceUrlResult createPresignedNotebookInstanceUrl(CreatePresignedNotebookInstanceUrlRequest request) {
request = beforeClientExecution(request);
return executeCreatePresignedNotebookInstanceUrl(request);
}
@SdkInternalApi
final CreatePresignedNotebookInstanceUrlResult executeCreatePresignedNotebookInstanceUrl(
CreatePresignedNotebookInstanceUrlRequest createPresignedNotebookInstanceUrlRequest) {
ExecutionContext executionContext = createExecutionContext(createPresignedNotebookInstanceUrlRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreatePresignedNotebookInstanceUrlRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createPresignedNotebookInstanceUrlRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreatePresignedNotebookInstanceUrl");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreatePresignedNotebookInstanceUrlResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a processing job.
*
*
* @param createProcessingJobRequest
* @return Result of the CreateProcessingJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.CreateProcessingJob
* @see AWS
* API Documentation
*/
@Override
public CreateProcessingJobResult createProcessingJob(CreateProcessingJobRequest request) {
request = beforeClientExecution(request);
return executeCreateProcessingJob(request);
}
@SdkInternalApi
final CreateProcessingJobResult executeCreateProcessingJob(CreateProcessingJobRequest createProcessingJobRequest) {
ExecutionContext executionContext = createExecutionContext(createProcessingJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateProcessingJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createProcessingJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateProcessingJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateProcessingJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a machine learning (ML) project that can contain one or more templates that set up an ML pipeline from
* training to deploying an approved model.
*
*
* @param createProjectRequest
* @return Result of the CreateProject operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateProject
* @see AWS API
* Documentation
*/
@Override
public CreateProjectResult createProject(CreateProjectRequest request) {
request = beforeClientExecution(request);
return executeCreateProject(request);
}
@SdkInternalApi
final CreateProjectResult executeCreateProject(CreateProjectRequest createProjectRequest) {
ExecutionContext executionContext = createExecutionContext(createProjectRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateProjectRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createProjectRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateProject");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateProjectResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a private space or a space used for real time collaboration in a domain.
*
*
* @param createSpaceRequest
* @return Result of the CreateSpace operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateSpace
* @see AWS API
* Documentation
*/
@Override
public CreateSpaceResult createSpace(CreateSpaceRequest request) {
request = beforeClientExecution(request);
return executeCreateSpace(request);
}
@SdkInternalApi
final CreateSpaceResult executeCreateSpace(CreateSpaceRequest createSpaceRequest) {
ExecutionContext executionContext = createExecutionContext(createSpaceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateSpaceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createSpaceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateSpace");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateSpaceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a new Amazon SageMaker Studio Lifecycle Configuration.
*
*
* @param createStudioLifecycleConfigRequest
* @return Result of the CreateStudioLifecycleConfig operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateStudioLifecycleConfig
* @see AWS API Documentation
*/
@Override
public CreateStudioLifecycleConfigResult createStudioLifecycleConfig(CreateStudioLifecycleConfigRequest request) {
request = beforeClientExecution(request);
return executeCreateStudioLifecycleConfig(request);
}
@SdkInternalApi
final CreateStudioLifecycleConfigResult executeCreateStudioLifecycleConfig(CreateStudioLifecycleConfigRequest createStudioLifecycleConfigRequest) {
ExecutionContext executionContext = createExecutionContext(createStudioLifecycleConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateStudioLifecycleConfigRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(createStudioLifecycleConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateStudioLifecycleConfig");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new CreateStudioLifecycleConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts a model training job. After training completes, SageMaker saves the resulting model artifacts to an Amazon
* S3 location that you specify.
*
*
* If you choose to host your model using SageMaker hosting services, you can use the resulting model artifacts as
* part of the model. You can also use the artifacts in a machine learning service other than SageMaker, provided
* that you know how to use them for inference.
*
*
* In the request body, you provide the following:
*
*
* -
*
* AlgorithmSpecification
- Identifies the training algorithm to use.
*
*
* -
*
* HyperParameters
- Specify these algorithm-specific parameters to enable the estimation of model
* parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of
* hyperparameters for each training algorithm provided by SageMaker, see Algorithms.
*
*
*
* Do not include any security-sensitive information including account access IDs, secrets or tokens in any
* hyperparameter field. If the use of security-sensitive credentials are detected, SageMaker will reject your
* training job request and return an exception error.
*
*
* -
*
* InputDataConfig
- Describes the input required by the training job and the Amazon S3, EFS, or FSx
* location where it is stored.
*
*
* -
*
* OutputDataConfig
- Identifies the Amazon S3 bucket where you want SageMaker to save the results of
* model training.
*
*
* -
*
* ResourceConfig
- Identifies the resources, ML compute instances, and ML storage volumes to deploy
* for model training. In distributed training, you specify more than one instance.
*
*
* -
*
* EnableManagedSpotTraining
- Optimize the cost of training machine learning models by up to 80% by
* using Amazon EC2 Spot instances. For more information, see Managed Spot
* Training.
*
*
* -
*
* RoleArn
- The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on your behalf
* during model training. You must grant this role the necessary permissions so that SageMaker can successfully
* complete model training.
*
*
* -
*
* StoppingCondition
- To help cap training costs, use MaxRuntimeInSeconds
to set a time
* limit for training. Use MaxWaitTimeInSeconds
to specify how long a managed spot training job has to
* complete.
*
*
* -
*
* Environment
- The environment variables to set in the Docker container.
*
*
* -
*
* RetryStrategy
- The number of times to retry the job when the job fails due to an
* InternalServerError
.
*
*
*
*
* For more information about SageMaker, see How It Works.
*
*
* @param createTrainingJobRequest
* @return Result of the CreateTrainingJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.CreateTrainingJob
* @see AWS
* API Documentation
*/
@Override
public CreateTrainingJobResult createTrainingJob(CreateTrainingJobRequest request) {
request = beforeClientExecution(request);
return executeCreateTrainingJob(request);
}
@SdkInternalApi
final CreateTrainingJobResult executeCreateTrainingJob(CreateTrainingJobRequest createTrainingJobRequest) {
ExecutionContext executionContext = createExecutionContext(createTrainingJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateTrainingJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createTrainingJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateTrainingJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateTrainingJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these
* results to an Amazon S3 location that you specify.
*
*
* To perform batch transformations, you create a transform job and use the data that you have readily available.
*
*
* In the request body, you provide the following:
*
*
* -
*
* TransformJobName
- Identifies the transform job. The name must be unique within an Amazon Web
* Services Region in an Amazon Web Services account.
*
*
* -
*
* ModelName
- Identifies the model to use. ModelName
must be the name of an existing
* Amazon SageMaker model in the same Amazon Web Services Region and Amazon Web Services account. For information on
* creating a model, see CreateModel.
*
*
* -
*
* TransformInput
- Describes the dataset to be transformed and the Amazon S3 location where it is
* stored.
*
*
* -
*
* TransformOutput
- Identifies the Amazon S3 location where you want Amazon SageMaker to save the
* results from the transform job.
*
*
* -
*
* TransformResources
- Identifies the ML compute instances for the transform job.
*
*
*
*
* For more information about how batch transformation works, see Batch Transform.
*
*
* @param createTransformJobRequest
* @return Result of the CreateTransformJob operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.CreateTransformJob
* @see AWS
* API Documentation
*/
@Override
public CreateTransformJobResult createTransformJob(CreateTransformJobRequest request) {
request = beforeClientExecution(request);
return executeCreateTransformJob(request);
}
@SdkInternalApi
final CreateTransformJobResult executeCreateTransformJob(CreateTransformJobRequest createTransformJobRequest) {
ExecutionContext executionContext = createExecutionContext(createTransformJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateTransformJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createTransformJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateTransformJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateTransformJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates an SageMaker trial. A trial is a set of steps called trial components that produce a
* machine learning model. A trial is part of a single SageMaker experiment.
*
*
* When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are
* automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must
* use the logging APIs provided by the SDK.
*
*
* You can add tags to a trial and then use the Search API to search for the
* tags.
*
*
* To get a list of all your trials, call the ListTrials API. To view
* a trial's properties, call the DescribeTrial API. To
* create a trial component, call the CreateTrialComponent API.
*
*
* @param createTrialRequest
* @return Result of the CreateTrial operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateTrial
* @see AWS API
* Documentation
*/
@Override
public CreateTrialResult createTrial(CreateTrialRequest request) {
request = beforeClientExecution(request);
return executeCreateTrial(request);
}
@SdkInternalApi
final CreateTrialResult executeCreateTrial(CreateTrialRequest createTrialRequest) {
ExecutionContext executionContext = createExecutionContext(createTrialRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateTrialRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createTrialRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateTrial");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateTrialResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a trial component, which is a stage of a machine learning trial. A trial is composed of one
* or more trial components. A trial component can be used in multiple trials.
*
*
* Trial components include pre-processing jobs, training jobs, and batch transform jobs.
*
*
* When you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial components are
* automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you must
* use the logging APIs provided by the SDK.
*
*
* You can add tags to a trial component and then use the Search API to search for the
* tags.
*
*
* @param createTrialComponentRequest
* @return Result of the CreateTrialComponent operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateTrialComponent
* @see AWS
* API Documentation
*/
@Override
public CreateTrialComponentResult createTrialComponent(CreateTrialComponentRequest request) {
request = beforeClientExecution(request);
return executeCreateTrialComponent(request);
}
@SdkInternalApi
final CreateTrialComponentResult executeCreateTrialComponent(CreateTrialComponentRequest createTrialComponentRequest) {
ExecutionContext executionContext = createExecutionContext(createTrialComponentRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateTrialComponentRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createTrialComponentRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateTrialComponent");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateTrialComponentResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference
* a "person" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when
* a user onboards to a domain. If an administrator invites a person by email or imports them from IAM Identity
* Center, a user profile is automatically created. A user profile is the primary holder of settings for an
* individual user and has a reference to the user's private Amazon Elastic File System home directory.
*
*
* @param createUserProfileRequest
* @return Result of the CreateUserProfile operation returned by the service.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.CreateUserProfile
* @see AWS
* API Documentation
*/
@Override
public CreateUserProfileResult createUserProfile(CreateUserProfileRequest request) {
request = beforeClientExecution(request);
return executeCreateUserProfile(request);
}
@SdkInternalApi
final CreateUserProfileResult executeCreateUserProfile(CreateUserProfileRequest createUserProfileRequest) {
ExecutionContext executionContext = createExecutionContext(createUserProfileRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateUserProfileRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createUserProfileRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateUserProfile");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateUserProfileResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Use this operation to create a workforce. This operation will return an error if a workforce already exists in
* the Amazon Web Services Region that you specify. You can only create one workforce in each Amazon Web Services
* Region per Amazon Web Services account.
*
*
* If you want to create a new workforce in an Amazon Web Services Region where a workforce already exists, use the
* DeleteWorkforce
* API operation to delete the existing workforce and then use CreateWorkforce
to create a new
* workforce.
*
*
* To create a private workforce using Amazon Cognito, you must specify a Cognito user pool in
* CognitoConfig
. You can also create an Amazon Cognito workforce using the Amazon SageMaker console.
* For more information, see Create a Private
* Workforce (Amazon Cognito).
*
*
* To create a private workforce using your own OIDC Identity Provider (IdP), specify your IdP configuration in
* OidcConfig
. Your OIDC IdP must support groups because groups are used by Ground Truth and
* Amazon A2I to create work teams. For more information, see Create a Private
* Workforce (OIDC IdP).
*
*
* @param createWorkforceRequest
* @return Result of the CreateWorkforce operation returned by the service.
* @sample AmazonSageMaker.CreateWorkforce
* @see AWS API
* Documentation
*/
@Override
public CreateWorkforceResult createWorkforce(CreateWorkforceRequest request) {
request = beforeClientExecution(request);
return executeCreateWorkforce(request);
}
@SdkInternalApi
final CreateWorkforceResult executeCreateWorkforce(CreateWorkforceRequest createWorkforceRequest) {
ExecutionContext executionContext = createExecutionContext(createWorkforceRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateWorkforceRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createWorkforceRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateWorkforce");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateWorkforceResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools.
* You must first create the user pools before you can create a work team.
*
*
* You cannot create more than 25 work teams in an account and region.
*
*
* @param createWorkteamRequest
* @return Result of the CreateWorkteam operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceLimitExceededException
* You have exceeded an SageMaker resource limit. For example, you might have too many training jobs
* created.
* @sample AmazonSageMaker.CreateWorkteam
* @see AWS API
* Documentation
*/
@Override
public CreateWorkteamResult createWorkteam(CreateWorkteamRequest request) {
request = beforeClientExecution(request);
return executeCreateWorkteam(request);
}
@SdkInternalApi
final CreateWorkteamResult executeCreateWorkteam(CreateWorkteamRequest createWorkteamRequest) {
ExecutionContext executionContext = createExecutionContext(createWorkteamRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new CreateWorkteamRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(createWorkteamRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "CreateWorkteam");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new CreateWorkteamResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an action.
*
*
* @param deleteActionRequest
* @return Result of the DeleteAction operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteAction
* @see AWS API
* Documentation
*/
@Override
public DeleteActionResult deleteAction(DeleteActionRequest request) {
request = beforeClientExecution(request);
return executeDeleteAction(request);
}
@SdkInternalApi
final DeleteActionResult executeDeleteAction(DeleteActionRequest deleteActionRequest) {
ExecutionContext executionContext = createExecutionContext(deleteActionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteActionRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteActionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteAction");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteActionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Removes the specified algorithm from your account.
*
*
* @param deleteAlgorithmRequest
* @return Result of the DeleteAlgorithm operation returned by the service.
* @throws ConflictException
* There was a conflict when you attempted to modify a SageMaker entity such as an Experiment
* or Artifact
.
* @sample AmazonSageMaker.DeleteAlgorithm
* @see AWS API
* Documentation
*/
@Override
public DeleteAlgorithmResult deleteAlgorithm(DeleteAlgorithmRequest request) {
request = beforeClientExecution(request);
return executeDeleteAlgorithm(request);
}
@SdkInternalApi
final DeleteAlgorithmResult executeDeleteAlgorithm(DeleteAlgorithmRequest deleteAlgorithmRequest) {
ExecutionContext executionContext = createExecutionContext(deleteAlgorithmRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteAlgorithmRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteAlgorithmRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteAlgorithm");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteAlgorithmResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Used to stop and delete an app.
*
*
* @param deleteAppRequest
* @return Result of the DeleteApp operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteApp
* @see AWS API
* Documentation
*/
@Override
public DeleteAppResult deleteApp(DeleteAppRequest request) {
request = beforeClientExecution(request);
return executeDeleteApp(request);
}
@SdkInternalApi
final DeleteAppResult executeDeleteApp(DeleteAppRequest deleteAppRequest) {
ExecutionContext executionContext = createExecutionContext(deleteAppRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteAppRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteAppRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteApp");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(new JsonOperationMetadata()
.withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteAppResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an AppImageConfig.
*
*
* @param deleteAppImageConfigRequest
* @return Result of the DeleteAppImageConfig operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteAppImageConfig
* @see AWS
* API Documentation
*/
@Override
public DeleteAppImageConfigResult deleteAppImageConfig(DeleteAppImageConfigRequest request) {
request = beforeClientExecution(request);
return executeDeleteAppImageConfig(request);
}
@SdkInternalApi
final DeleteAppImageConfigResult executeDeleteAppImageConfig(DeleteAppImageConfigRequest deleteAppImageConfigRequest) {
ExecutionContext executionContext = createExecutionContext(deleteAppImageConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteAppImageConfigRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteAppImageConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteAppImageConfig");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteAppImageConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an artifact. Either ArtifactArn
or Source
must be specified.
*
*
* @param deleteArtifactRequest
* @return Result of the DeleteArtifact operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteArtifact
* @see AWS API
* Documentation
*/
@Override
public DeleteArtifactResult deleteArtifact(DeleteArtifactRequest request) {
request = beforeClientExecution(request);
return executeDeleteArtifact(request);
}
@SdkInternalApi
final DeleteArtifactResult executeDeleteArtifact(DeleteArtifactRequest deleteArtifactRequest) {
ExecutionContext executionContext = createExecutionContext(deleteArtifactRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteArtifactRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteArtifactRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteArtifact");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteArtifactResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an association.
*
*
* @param deleteAssociationRequest
* @return Result of the DeleteAssociation operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteAssociation
* @see AWS
* API Documentation
*/
@Override
public DeleteAssociationResult deleteAssociation(DeleteAssociationRequest request) {
request = beforeClientExecution(request);
return executeDeleteAssociation(request);
}
@SdkInternalApi
final DeleteAssociationResult executeDeleteAssociation(DeleteAssociationRequest deleteAssociationRequest) {
ExecutionContext executionContext = createExecutionContext(deleteAssociationRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteAssociationRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteAssociationRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteAssociation");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteAssociationResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Delete a SageMaker HyperPod cluster.
*
*
* @param deleteClusterRequest
* @return Result of the DeleteCluster operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @throws ConflictException
* There was a conflict when you attempted to modify a SageMaker entity such as an Experiment
* or Artifact
.
* @sample AmazonSageMaker.DeleteCluster
* @see AWS API
* Documentation
*/
@Override
public DeleteClusterResult deleteCluster(DeleteClusterRequest request) {
request = beforeClientExecution(request);
return executeDeleteCluster(request);
}
@SdkInternalApi
final DeleteClusterResult executeDeleteCluster(DeleteClusterRequest deleteClusterRequest) {
ExecutionContext executionContext = createExecutionContext(deleteClusterRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteClusterRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteClusterRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteCluster");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteClusterResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified Git repository from your account.
*
*
* @param deleteCodeRepositoryRequest
* @return Result of the DeleteCodeRepository operation returned by the service.
* @sample AmazonSageMaker.DeleteCodeRepository
* @see AWS
* API Documentation
*/
@Override
public DeleteCodeRepositoryResult deleteCodeRepository(DeleteCodeRepositoryRequest request) {
request = beforeClientExecution(request);
return executeDeleteCodeRepository(request);
}
@SdkInternalApi
final DeleteCodeRepositoryResult executeDeleteCodeRepository(DeleteCodeRepositoryRequest deleteCodeRepositoryRequest) {
ExecutionContext executionContext = createExecutionContext(deleteCodeRepositoryRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteCodeRepositoryRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteCodeRepositoryRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteCodeRepository");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteCodeRepositoryResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified compilation job. This action deletes only the compilation job resource in Amazon SageMaker.
* It doesn't delete other resources that are related to that job, such as the model artifacts that the job creates,
* the compilation logs in CloudWatch, the compiled model, or the IAM role.
*
*
* You can delete a compilation job only if its current status is COMPLETED
, FAILED
, or
* STOPPED
. If the job status is STARTING
or INPROGRESS
, stop the job, and
* then delete it after its status becomes STOPPED
.
*
*
* @param deleteCompilationJobRequest
* @return Result of the DeleteCompilationJob operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteCompilationJob
* @see AWS
* API Documentation
*/
@Override
public DeleteCompilationJobResult deleteCompilationJob(DeleteCompilationJobRequest request) {
request = beforeClientExecution(request);
return executeDeleteCompilationJob(request);
}
@SdkInternalApi
final DeleteCompilationJobResult executeDeleteCompilationJob(DeleteCompilationJobRequest deleteCompilationJobRequest) {
ExecutionContext executionContext = createExecutionContext(deleteCompilationJobRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteCompilationJobRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteCompilationJobRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteCompilationJob");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteCompilationJobResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an context.
*
*
* @param deleteContextRequest
* @return Result of the DeleteContext operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteContext
* @see AWS API
* Documentation
*/
@Override
public DeleteContextResult deleteContext(DeleteContextRequest request) {
request = beforeClientExecution(request);
return executeDeleteContext(request);
}
@SdkInternalApi
final DeleteContextResult executeDeleteContext(DeleteContextRequest deleteContextRequest) {
ExecutionContext executionContext = createExecutionContext(deleteContextRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteContextRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteContextRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteContext");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteContextResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes a data quality monitoring job definition.
*
*
* @param deleteDataQualityJobDefinitionRequest
* @return Result of the DeleteDataQualityJobDefinition operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteDataQualityJobDefinition
* @see AWS API Documentation
*/
@Override
public DeleteDataQualityJobDefinitionResult deleteDataQualityJobDefinition(DeleteDataQualityJobDefinitionRequest request) {
request = beforeClientExecution(request);
return executeDeleteDataQualityJobDefinition(request);
}
@SdkInternalApi
final DeleteDataQualityJobDefinitionResult executeDeleteDataQualityJobDefinition(DeleteDataQualityJobDefinitionRequest deleteDataQualityJobDefinitionRequest) {
ExecutionContext executionContext = createExecutionContext(deleteDataQualityJobDefinitionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteDataQualityJobDefinitionRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(deleteDataQualityJobDefinitionRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteDataQualityJobDefinition");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DeleteDataQualityJobDefinitionResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes a fleet.
*
*
* @param deleteDeviceFleetRequest
* @return Result of the DeleteDeviceFleet operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.DeleteDeviceFleet
* @see AWS
* API Documentation
*/
@Override
public DeleteDeviceFleetResult deleteDeviceFleet(DeleteDeviceFleetRequest request) {
request = beforeClientExecution(request);
return executeDeleteDeviceFleet(request);
}
@SdkInternalApi
final DeleteDeviceFleetResult executeDeleteDeviceFleet(DeleteDeviceFleetRequest deleteDeviceFleetRequest) {
ExecutionContext executionContext = createExecutionContext(deleteDeviceFleetRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteDeviceFleetRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteDeviceFleetRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteDeviceFleet");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteDeviceFleetResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Used to delete a domain. If you onboarded with IAM mode, you will need to delete your domain to onboard again
* using IAM Identity Center. Use with caution. All of the members of the domain will lose access to their EFS
* volume, including data, notebooks, and other artifacts.
*
*
* @param deleteDomainRequest
* @return Result of the DeleteDomain operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteDomain
* @see AWS API
* Documentation
*/
@Override
public DeleteDomainResult deleteDomain(DeleteDomainRequest request) {
request = beforeClientExecution(request);
return executeDeleteDomain(request);
}
@SdkInternalApi
final DeleteDomainResult executeDeleteDomain(DeleteDomainRequest deleteDomainRequest) {
ExecutionContext executionContext = createExecutionContext(deleteDomainRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteDomainRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteDomainRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteDomain");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteDomainResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an edge deployment plan if (and only if) all the stages in the plan are inactive or there are no stages
* in the plan.
*
*
* @param deleteEdgeDeploymentPlanRequest
* @return Result of the DeleteEdgeDeploymentPlan operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.DeleteEdgeDeploymentPlan
* @see AWS API Documentation
*/
@Override
public DeleteEdgeDeploymentPlanResult deleteEdgeDeploymentPlan(DeleteEdgeDeploymentPlanRequest request) {
request = beforeClientExecution(request);
return executeDeleteEdgeDeploymentPlan(request);
}
@SdkInternalApi
final DeleteEdgeDeploymentPlanResult executeDeleteEdgeDeploymentPlan(DeleteEdgeDeploymentPlanRequest deleteEdgeDeploymentPlanRequest) {
ExecutionContext executionContext = createExecutionContext(deleteEdgeDeploymentPlanRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteEdgeDeploymentPlanRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(deleteEdgeDeploymentPlanRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteEdgeDeploymentPlan");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DeleteEdgeDeploymentPlanResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Delete a stage in an edge deployment plan if (and only if) the stage is inactive.
*
*
* @param deleteEdgeDeploymentStageRequest
* @return Result of the DeleteEdgeDeploymentStage operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @sample AmazonSageMaker.DeleteEdgeDeploymentStage
* @see AWS API Documentation
*/
@Override
public DeleteEdgeDeploymentStageResult deleteEdgeDeploymentStage(DeleteEdgeDeploymentStageRequest request) {
request = beforeClientExecution(request);
return executeDeleteEdgeDeploymentStage(request);
}
@SdkInternalApi
final DeleteEdgeDeploymentStageResult executeDeleteEdgeDeploymentStage(DeleteEdgeDeploymentStageRequest deleteEdgeDeploymentStageRequest) {
ExecutionContext executionContext = createExecutionContext(deleteEdgeDeploymentStageRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteEdgeDeploymentStageRequestProtocolMarshaller(protocolFactory).marshall(super
.beforeMarshalling(deleteEdgeDeploymentStageRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteEdgeDeploymentStage");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false),
new DeleteEdgeDeploymentStageResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an endpoint. SageMaker frees up all of the resources that were deployed when the endpoint was created.
*
*
* SageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't need to use the RevokeGrant API call.
*
*
* When you delete your endpoint, SageMaker asynchronously deletes associated endpoint resources such as KMS key
* grants. You might still see these resources in your account for a few minutes after deleting your endpoint. Do
* not delete or revoke the permissions for your
* ExecutionRoleArn
* , otherwise SageMaker cannot delete these resources.
*
*
* @param deleteEndpointRequest
* @return Result of the DeleteEndpoint operation returned by the service.
* @sample AmazonSageMaker.DeleteEndpoint
* @see AWS API
* Documentation
*/
@Override
public DeleteEndpointResult deleteEndpoint(DeleteEndpointRequest request) {
request = beforeClientExecution(request);
return executeDeleteEndpoint(request);
}
@SdkInternalApi
final DeleteEndpointResult executeDeleteEndpoint(DeleteEndpointRequest deleteEndpointRequest) {
ExecutionContext executionContext = createExecutionContext(deleteEndpointRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteEndpointRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteEndpointRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteEndpoint");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteEndpointResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an endpoint configuration. The DeleteEndpointConfig
API deletes only the specified
* configuration. It does not delete endpoints created using the configuration.
*
*
* You must not delete an EndpointConfig
in use by an endpoint that is live or while the
* UpdateEndpoint
or CreateEndpoint
operations are being performed on the endpoint. If you
* delete the EndpointConfig
of an endpoint that is active or being created or updated you may lose
* visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring
* charges.
*
*
* @param deleteEndpointConfigRequest
* @return Result of the DeleteEndpointConfig operation returned by the service.
* @sample AmazonSageMaker.DeleteEndpointConfig
* @see AWS
* API Documentation
*/
@Override
public DeleteEndpointConfigResult deleteEndpointConfig(DeleteEndpointConfigRequest request) {
request = beforeClientExecution(request);
return executeDeleteEndpointConfig(request);
}
@SdkInternalApi
final DeleteEndpointConfigResult executeDeleteEndpointConfig(DeleteEndpointConfigRequest deleteEndpointConfigRequest) {
ExecutionContext executionContext = createExecutionContext(deleteEndpointConfigRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteEndpointConfigRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteEndpointConfigRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteEndpointConfig");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteEndpointConfigResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes an SageMaker experiment. All trials associated with the experiment must be deleted first. Use the ListTrials API to get a
* list of the trials associated with the experiment.
*
*
* @param deleteExperimentRequest
* @return Result of the DeleteExperiment operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteExperiment
* @see AWS API
* Documentation
*/
@Override
public DeleteExperimentResult deleteExperiment(DeleteExperimentRequest request) {
request = beforeClientExecution(request);
return executeDeleteExperiment(request);
}
@SdkInternalApi
final DeleteExperimentResult executeDeleteExperiment(DeleteExperimentRequest deleteExperimentRequest) {
ExecutionContext executionContext = createExecutionContext(deleteExperimentRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteExperimentRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteExperimentRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteExperiment");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteExperimentResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Delete the FeatureGroup
and any data that was written to the OnlineStore
of the
* FeatureGroup
. Data cannot be accessed from the OnlineStore
immediately after
* DeleteFeatureGroup
is called.
*
*
* Data written into the OfflineStore
will not be deleted. The Amazon Web Services Glue database and
* tables that are automatically created for your OfflineStore
are not deleted.
*
*
* Note that it can take approximately 10-15 minutes to delete an OnlineStore
FeatureGroup
* with the InMemory
StorageType
.
*
*
* @param deleteFeatureGroupRequest
* @return Result of the DeleteFeatureGroup operation returned by the service.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteFeatureGroup
* @see AWS
* API Documentation
*/
@Override
public DeleteFeatureGroupResult deleteFeatureGroup(DeleteFeatureGroupRequest request) {
request = beforeClientExecution(request);
return executeDeleteFeatureGroup(request);
}
@SdkInternalApi
final DeleteFeatureGroupResult executeDeleteFeatureGroup(DeleteFeatureGroupRequest deleteFeatureGroupRequest) {
ExecutionContext executionContext = createExecutionContext(deleteFeatureGroupRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response response = null;
try {
awsRequestMetrics.startEvent(Field.RequestMarshallTime);
try {
request = new DeleteFeatureGroupRequestProtocolMarshaller(protocolFactory).marshall(super.beforeMarshalling(deleteFeatureGroupRequest));
// Binds the request metrics to the current request.
request.setAWSRequestMetrics(awsRequestMetrics);
request.addHandlerContext(HandlerContextKey.CLIENT_ENDPOINT, endpoint);
request.addHandlerContext(HandlerContextKey.ENDPOINT_OVERRIDDEN, isEndpointOverridden());
request.addHandlerContext(HandlerContextKey.SIGNING_REGION, getSigningRegion());
request.addHandlerContext(HandlerContextKey.SERVICE_ID, "SageMaker");
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteFeatureGroup");
request.addHandlerContext(HandlerContextKey.ADVANCED_CONFIG, advancedConfig);
} finally {
awsRequestMetrics.endEvent(Field.RequestMarshallTime);
}
HttpResponseHandler> responseHandler = protocolFactory.createResponseHandler(
new JsonOperationMetadata().withPayloadJson(true).withHasStreamingSuccessResponse(false), new DeleteFeatureGroupResultJsonUnmarshaller());
response = invoke(request, responseHandler, executionContext);
return response.getAwsResponse();
} finally {
endClientExecution(awsRequestMetrics, request, response);
}
}
/**
*
* Deletes the specified flow definition.
*
*
* @param deleteFlowDefinitionRequest
* @return Result of the DeleteFlowDefinition operation returned by the service.
* @throws ResourceInUseException
* Resource being accessed is in use.
* @throws ResourceNotFoundException
* Resource being access is not found.
* @sample AmazonSageMaker.DeleteFlowDefinition
* @see AWS
* API Documentation
*/
@Override
public DeleteFlowDefinitionResult deleteFlowDefinition(DeleteFlowDefinitionRequest request) {
request = beforeClientExecution(request);
return executeDeleteFlowDefinition(request);
}
@SdkInternalApi
final DeleteFlowDefinitionResult executeDeleteFlowDefinition(DeleteFlowDefinitionRequest deleteFlowDefinitionRequest) {
ExecutionContext executionContext = createExecutionContext(deleteFlowDefinitionRequest);
AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
awsRequestMetrics.startEvent(Field.ClientExecuteTime);
Request request = null;
Response