software.amazon.awssdk.services.s3.DefaultS3Client Maven / Gradle / Ivy
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package software.amazon.awssdk.services.s3;
import java.util.Collections;
import java.util.List;
import java.util.function.Consumer;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.annotations.SdkInternalApi;
import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration;
import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler;
import software.amazon.awssdk.awscore.exception.AwsServiceException;
import software.amazon.awssdk.core.ApiName;
import software.amazon.awssdk.core.RequestOverrideConfiguration;
import software.amazon.awssdk.core.Response;
import software.amazon.awssdk.core.client.config.SdkClientConfiguration;
import software.amazon.awssdk.core.client.config.SdkClientOption;
import software.amazon.awssdk.core.client.handler.ClientExecutionParams;
import software.amazon.awssdk.core.client.handler.SyncClientHandler;
import software.amazon.awssdk.core.exception.SdkClientException;
import software.amazon.awssdk.core.http.HttpResponseHandler;
import software.amazon.awssdk.core.interceptor.SdkInternalExecutionAttribute;
import software.amazon.awssdk.core.interceptor.trait.HttpChecksumRequired;
import software.amazon.awssdk.core.metrics.CoreMetric;
import software.amazon.awssdk.core.runtime.transform.StreamingRequestMarshaller;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.core.sync.ResponseTransformer;
import software.amazon.awssdk.core.util.VersionInfo;
import software.amazon.awssdk.metrics.MetricCollector;
import software.amazon.awssdk.metrics.MetricPublisher;
import software.amazon.awssdk.metrics.NoOpMetricCollector;
import software.amazon.awssdk.protocols.core.ExceptionMetadata;
import software.amazon.awssdk.protocols.xml.AwsS3ProtocolFactory;
import software.amazon.awssdk.protocols.xml.XmlOperationMetadata;
import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.BucketAlreadyExistsException;
import software.amazon.awssdk.services.s3.model.BucketAlreadyOwnedByYouException;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.CopyObjectRequest;
import software.amazon.awssdk.services.s3.model.CopyObjectResponse;
import software.amazon.awssdk.services.s3.model.CreateBucketRequest;
import software.amazon.awssdk.services.s3.model.CreateBucketResponse;
import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketAnalyticsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketAnalyticsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketCorsRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketCorsResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketIntelligentTieringConfigurationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketIntelligentTieringConfigurationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketInventoryConfigurationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketInventoryConfigurationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketLifecycleRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketLifecycleResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketMetricsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketMetricsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketOwnershipControlsRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketOwnershipControlsResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketReplicationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketReplicationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketTaggingRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketTaggingResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketWebsiteRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketWebsiteResponse;
import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
import software.amazon.awssdk.services.s3.model.DeleteObjectResponse;
import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingRequest;
import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingResponse;
import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse;
import software.amazon.awssdk.services.s3.model.DeletePublicAccessBlockRequest;
import software.amazon.awssdk.services.s3.model.DeletePublicAccessBlockResponse;
import software.amazon.awssdk.services.s3.model.GetBucketAccelerateConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketAccelerateConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketAclRequest;
import software.amazon.awssdk.services.s3.model.GetBucketAclResponse;
import software.amazon.awssdk.services.s3.model.GetBucketAnalyticsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketAnalyticsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketCorsRequest;
import software.amazon.awssdk.services.s3.model.GetBucketCorsResponse;
import software.amazon.awssdk.services.s3.model.GetBucketEncryptionRequest;
import software.amazon.awssdk.services.s3.model.GetBucketEncryptionResponse;
import software.amazon.awssdk.services.s3.model.GetBucketIntelligentTieringConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketIntelligentTieringConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketInventoryConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketInventoryConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketLifecycleConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketLifecycleConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketLocationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketLocationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketLoggingRequest;
import software.amazon.awssdk.services.s3.model.GetBucketLoggingResponse;
import software.amazon.awssdk.services.s3.model.GetBucketMetricsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketMetricsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketNotificationConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketNotificationConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketOwnershipControlsRequest;
import software.amazon.awssdk.services.s3.model.GetBucketOwnershipControlsResponse;
import software.amazon.awssdk.services.s3.model.GetBucketPolicyRequest;
import software.amazon.awssdk.services.s3.model.GetBucketPolicyResponse;
import software.amazon.awssdk.services.s3.model.GetBucketPolicyStatusRequest;
import software.amazon.awssdk.services.s3.model.GetBucketPolicyStatusResponse;
import software.amazon.awssdk.services.s3.model.GetBucketReplicationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketReplicationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketRequestPaymentRequest;
import software.amazon.awssdk.services.s3.model.GetBucketRequestPaymentResponse;
import software.amazon.awssdk.services.s3.model.GetBucketTaggingRequest;
import software.amazon.awssdk.services.s3.model.GetBucketTaggingResponse;
import software.amazon.awssdk.services.s3.model.GetBucketVersioningRequest;
import software.amazon.awssdk.services.s3.model.GetBucketVersioningResponse;
import software.amazon.awssdk.services.s3.model.GetBucketWebsiteRequest;
import software.amazon.awssdk.services.s3.model.GetBucketWebsiteResponse;
import software.amazon.awssdk.services.s3.model.GetObjectAclRequest;
import software.amazon.awssdk.services.s3.model.GetObjectAclResponse;
import software.amazon.awssdk.services.s3.model.GetObjectLegalHoldRequest;
import software.amazon.awssdk.services.s3.model.GetObjectLegalHoldResponse;
import software.amazon.awssdk.services.s3.model.GetObjectLockConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetObjectLockConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.model.GetObjectResponse;
import software.amazon.awssdk.services.s3.model.GetObjectRetentionRequest;
import software.amazon.awssdk.services.s3.model.GetObjectRetentionResponse;
import software.amazon.awssdk.services.s3.model.GetObjectTaggingRequest;
import software.amazon.awssdk.services.s3.model.GetObjectTaggingResponse;
import software.amazon.awssdk.services.s3.model.GetObjectTorrentRequest;
import software.amazon.awssdk.services.s3.model.GetObjectTorrentResponse;
import software.amazon.awssdk.services.s3.model.GetPublicAccessBlockRequest;
import software.amazon.awssdk.services.s3.model.GetPublicAccessBlockResponse;
import software.amazon.awssdk.services.s3.model.HeadBucketRequest;
import software.amazon.awssdk.services.s3.model.HeadBucketResponse;
import software.amazon.awssdk.services.s3.model.HeadObjectRequest;
import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
import software.amazon.awssdk.services.s3.model.InvalidObjectStateException;
import software.amazon.awssdk.services.s3.model.ListBucketAnalyticsConfigurationsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketAnalyticsConfigurationsResponse;
import software.amazon.awssdk.services.s3.model.ListBucketIntelligentTieringConfigurationsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketIntelligentTieringConfigurationsResponse;
import software.amazon.awssdk.services.s3.model.ListBucketInventoryConfigurationsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketInventoryConfigurationsResponse;
import software.amazon.awssdk.services.s3.model.ListBucketMetricsConfigurationsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketMetricsConfigurationsResponse;
import software.amazon.awssdk.services.s3.model.ListBucketsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketsResponse;
import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest;
import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse;
import software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest;
import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse;
import software.amazon.awssdk.services.s3.model.ListObjectsRequest;
import software.amazon.awssdk.services.s3.model.ListObjectsResponse;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
import software.amazon.awssdk.services.s3.model.ListPartsRequest;
import software.amazon.awssdk.services.s3.model.ListPartsResponse;
import software.amazon.awssdk.services.s3.model.NoSuchBucketException;
import software.amazon.awssdk.services.s3.model.NoSuchKeyException;
import software.amazon.awssdk.services.s3.model.NoSuchUploadException;
import software.amazon.awssdk.services.s3.model.ObjectAlreadyInActiveTierErrorException;
import software.amazon.awssdk.services.s3.model.ObjectNotInActiveTierErrorException;
import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketAclRequest;
import software.amazon.awssdk.services.s3.model.PutBucketAclResponse;
import software.amazon.awssdk.services.s3.model.PutBucketAnalyticsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketAnalyticsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketCorsRequest;
import software.amazon.awssdk.services.s3.model.PutBucketCorsResponse;
import software.amazon.awssdk.services.s3.model.PutBucketEncryptionRequest;
import software.amazon.awssdk.services.s3.model.PutBucketEncryptionResponse;
import software.amazon.awssdk.services.s3.model.PutBucketIntelligentTieringConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketIntelligentTieringConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketInventoryConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketInventoryConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketLifecycleConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketLifecycleConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketLoggingRequest;
import software.amazon.awssdk.services.s3.model.PutBucketLoggingResponse;
import software.amazon.awssdk.services.s3.model.PutBucketMetricsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketMetricsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketOwnershipControlsRequest;
import software.amazon.awssdk.services.s3.model.PutBucketOwnershipControlsResponse;
import software.amazon.awssdk.services.s3.model.PutBucketPolicyRequest;
import software.amazon.awssdk.services.s3.model.PutBucketPolicyResponse;
import software.amazon.awssdk.services.s3.model.PutBucketReplicationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketReplicationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketRequestPaymentRequest;
import software.amazon.awssdk.services.s3.model.PutBucketRequestPaymentResponse;
import software.amazon.awssdk.services.s3.model.PutBucketTaggingRequest;
import software.amazon.awssdk.services.s3.model.PutBucketTaggingResponse;
import software.amazon.awssdk.services.s3.model.PutBucketVersioningRequest;
import software.amazon.awssdk.services.s3.model.PutBucketVersioningResponse;
import software.amazon.awssdk.services.s3.model.PutBucketWebsiteRequest;
import software.amazon.awssdk.services.s3.model.PutBucketWebsiteResponse;
import software.amazon.awssdk.services.s3.model.PutObjectAclRequest;
import software.amazon.awssdk.services.s3.model.PutObjectAclResponse;
import software.amazon.awssdk.services.s3.model.PutObjectLegalHoldRequest;
import software.amazon.awssdk.services.s3.model.PutObjectLegalHoldResponse;
import software.amazon.awssdk.services.s3.model.PutObjectLockConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutObjectLockConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.model.PutObjectResponse;
import software.amazon.awssdk.services.s3.model.PutObjectRetentionRequest;
import software.amazon.awssdk.services.s3.model.PutObjectRetentionResponse;
import software.amazon.awssdk.services.s3.model.PutObjectTaggingRequest;
import software.amazon.awssdk.services.s3.model.PutObjectTaggingResponse;
import software.amazon.awssdk.services.s3.model.PutPublicAccessBlockRequest;
import software.amazon.awssdk.services.s3.model.PutPublicAccessBlockResponse;
import software.amazon.awssdk.services.s3.model.RestoreObjectRequest;
import software.amazon.awssdk.services.s3.model.RestoreObjectResponse;
import software.amazon.awssdk.services.s3.model.S3Exception;
import software.amazon.awssdk.services.s3.model.S3Request;
import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest;
import software.amazon.awssdk.services.s3.model.UploadPartCopyResponse;
import software.amazon.awssdk.services.s3.model.UploadPartRequest;
import software.amazon.awssdk.services.s3.model.UploadPartResponse;
import software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsIterable;
import software.amazon.awssdk.services.s3.paginators.ListObjectVersionsIterable;
import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable;
import software.amazon.awssdk.services.s3.paginators.ListPartsIterable;
import software.amazon.awssdk.services.s3.transform.AbortMultipartUploadRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.CompleteMultipartUploadRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.CopyObjectRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.CreateBucketRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.CreateMultipartUploadRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketAnalyticsConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketCorsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketEncryptionRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketIntelligentTieringConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketInventoryConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketLifecycleRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketMetricsConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketOwnershipControlsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketPolicyRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketReplicationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketTaggingRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteBucketWebsiteRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteObjectRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteObjectTaggingRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeleteObjectsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.DeletePublicAccessBlockRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketAccelerateConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketAclRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketAnalyticsConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketCorsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketEncryptionRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketIntelligentTieringConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketInventoryConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketLifecycleConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketLocationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketLoggingRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketMetricsConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketNotificationConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketOwnershipControlsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketPolicyRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketPolicyStatusRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketReplicationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketRequestPaymentRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketTaggingRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketVersioningRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetBucketWebsiteRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetObjectAclRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetObjectLegalHoldRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetObjectLockConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetObjectRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetObjectRetentionRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetObjectTaggingRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetObjectTorrentRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.GetPublicAccessBlockRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.HeadBucketRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.HeadObjectRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListBucketAnalyticsConfigurationsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListBucketIntelligentTieringConfigurationsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListBucketInventoryConfigurationsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListBucketMetricsConfigurationsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListBucketsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListMultipartUploadsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListObjectVersionsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListObjectsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListObjectsV2RequestMarshaller;
import software.amazon.awssdk.services.s3.transform.ListPartsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketAccelerateConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketAclRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketAnalyticsConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketCorsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketEncryptionRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketIntelligentTieringConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketInventoryConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketLifecycleConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketLoggingRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketMetricsConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketNotificationConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketOwnershipControlsRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketPolicyRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketReplicationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketRequestPaymentRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketTaggingRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketVersioningRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutBucketWebsiteRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutObjectAclRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutObjectLegalHoldRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutObjectLockConfigurationRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutObjectRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutObjectRetentionRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutObjectTaggingRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.PutPublicAccessBlockRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.RestoreObjectRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.UploadPartCopyRequestMarshaller;
import software.amazon.awssdk.services.s3.transform.UploadPartRequestMarshaller;
import software.amazon.awssdk.services.s3.waiters.S3Waiter;
import software.amazon.awssdk.utils.Logger;
/**
* Internal implementation of {@link S3Client}.
*
* @see S3Client#builder()
*/
@Generated("software.amazon.awssdk:codegen")
@SdkInternalApi
final class DefaultS3Client implements S3Client {
private static final Logger log = Logger.loggerFor(DefaultS3Client.class);
private final SyncClientHandler clientHandler;
private final AwsS3ProtocolFactory protocolFactory;
private final SdkClientConfiguration clientConfiguration;
protected DefaultS3Client(SdkClientConfiguration clientConfiguration) {
this.clientHandler = new AwsSyncClientHandler(clientConfiguration);
this.clientConfiguration = clientConfiguration;
this.protocolFactory = init();
}
@Override
public final String serviceName() {
return SERVICE_NAME;
}
/**
*
* This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be
* uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if
* any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might
* be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by
* all parts.
*
*
* To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure that
* the parts list is empty.
*
*
* For information about permissions required to use the multipart upload API, see Multipart Upload API and
* Permissions.
*
*
* The following operations are related to AbortMultipartUpload
:
*
*
* -
*
*
* -
*
* UploadPart
*
*
* -
*
*
* -
*
* ListParts
*
*
* -
*
*
*
*
* @param abortMultipartUploadRequest
* @return Result of the AbortMultipartUpload operation returned by the service.
* @throws NoSuchUploadException
* The specified multipart upload does not exist.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.AbortMultipartUpload
*/
@Override
public AbortMultipartUploadResponse abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest)
throws NoSuchUploadException, AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(AbortMultipartUploadResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, abortMultipartUploadRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "AbortMultipartUpload");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("AbortMultipartUpload").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(abortMultipartUploadRequest)
.withMarshaller(new AbortMultipartUploadRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Completes a multipart upload by assembling previously uploaded parts.
*
*
* You first initiate the multipart upload and then upload all parts using the UploadPart operation. After
* successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon
* receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new
* object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts
* list is complete. This operation concatenates the parts that you provide in the list. For each part in the list,
* you must provide the part number and the ETag
value, returned after that part was uploaded.
*
*
* Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins
* processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in
* progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a
* request could fail after the initial 200 OK response has been sent, it is important that you check the response
* body to determine whether the request succeeded.
*
*
* Note that if CompleteMultipartUpload
fails, applications should be prepared to retry the failed
* requests. For more information, see Amazon S3 Error Best
* Practices.
*
*
* For more information about multipart uploads, see Uploading Objects Using Multipart
* Upload.
*
*
* For information about permissions required to use the multipart upload API, see Multipart Upload API and
* Permissions.
*
*
* CompleteMultipartUpload
has the following special errors:
*
*
* -
*
* Error code: EntityTooSmall
*
*
* -
*
* Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5
* MB in size, except the last part.
*
*
* -
*
* 400 Bad Request
*
*
*
*
* -
*
* Error code: InvalidPart
*
*
* -
*
* Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the
* specified entity tag might not have matched the part's entity tag.
*
*
* -
*
* 400 Bad Request
*
*
*
*
* -
*
* Error code: InvalidPartOrder
*
*
* -
*
* Description: The list of parts was not in ascending order. The parts list must be specified in order by part
* number.
*
*
* -
*
* 400 Bad Request
*
*
*
*
* -
*
* Error code: NoSuchUpload
*
*
* -
*
* Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
* upload might have been aborted or completed.
*
*
* -
*
* 404 Not Found
*
*
*
*
*
*
* The following operations are related to CompleteMultipartUpload
:
*
*
* -
*
*
* -
*
* UploadPart
*
*
* -
*
*
* -
*
* ListParts
*
*
* -
*
*
*
*
* @param completeMultipartUploadRequest
* @return Result of the CompleteMultipartUpload operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.CompleteMultipartUpload
*/
@Override
public CompleteMultipartUploadResponse completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(CompleteMultipartUploadResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, completeMultipartUploadRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CompleteMultipartUpload");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("CompleteMultipartUpload").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(completeMultipartUploadRequest)
.withMarshaller(new CompleteMultipartUploadRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Creates a copy of an object that is already stored in Amazon S3.
*
*
*
* You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size
* in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the
* multipart upload Upload Part - Copy API. For more information, see Copy Object Using the
* REST Multipart Upload API.
*
*
*
* All copy requests must be authenticated. Additionally, you must have read access to the source object and
* write access to the destination bucket. For more information, see REST Authentication. Both the
* Region that you want to copy the object from and the Region that you want to copy the object to must be enabled
* for your account.
*
*
* A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the
* files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error
* occurs during the copy operation, the error response is embedded in the 200 OK
response. This means
* that a 200 OK
response can contain either a success or an error. Design your application to parse
* the contents of the response and handle it appropriately.
*
*
* If the copy is successful, you receive a response with information about the copied object.
*
*
*
* If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the
* content-length, and you would need to read the entire body.
*
*
*
* The copy request charge is based on the storage class and Region that you specify for the destination object. For
* pricing information, see Amazon S3 pricing.
*
*
*
* Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a
* transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
*
*
*
* Metadata
*
*
* When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not
* preserved and is set to private for the user making the request. To override the default ACL setting, specify a
* new ACL when generating a copy request. For more information, see Using ACLs.
*
*
* To specify whether you want the object metadata copied from the source object or replaced with metadata provided
* in the request, you can optionally add the x-amz-metadata-directive
header. When you grant
* permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata
* behavior when objects are uploaded. For more information, see Specifying Conditions in a
* Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see
* Actions, Resources, and Condition
* Keys for Amazon S3.
*
*
* x-amz-copy-source-if
Headers
*
*
* To only copy an object under certain conditions, such as whether the Etag
matches or whether the
* object was modified before or after a specified date, use the following request parameters:
*
*
* -
*
* x-amz-copy-source-if-match
*
*
* -
*
* x-amz-copy-source-if-none-match
*
*
* -
*
* x-amz-copy-source-if-unmodified-since
*
*
* -
*
* x-amz-copy-source-if-modified-since
*
*
*
*
* If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
* headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the
* data:
*
*
* -
*
* x-amz-copy-source-if-match
condition evaluates to true
*
*
* -
*
* x-amz-copy-source-if-unmodified-since
condition evaluates to false
*
*
*
*
* If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
* headers are present in the request and evaluate as follows, Amazon S3 returns the
* 412 Precondition Failed
response code:
*
*
* -
*
* x-amz-copy-source-if-none-match
condition evaluates to false
*
*
* -
*
* x-amz-copy-source-if-modified-since
condition evaluates to true
*
*
*
*
*
* All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
*
*
*
* Encryption
*
*
* The source object that you are copying can be encrypted or unencrypted. The source object can be encrypted with
* server-side encryption using AWS managed encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided
* encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data
* centers and decrypts the data when you access it.
*
*
* You can optionally use the appropriate encryption-related headers to request server-side encryption for the
* target object. You have the option to provide your own encryption key or use SSE-S3 or SSE-KMS, regardless of the
* form of server-side encryption that was used to encrypt the source object. You can even request encryption if the
* source object was not encrypted. For more information about server-side encryption, see Using Server-Side
* Encryption.
*
*
* Access Control List (ACL)-Specific Request Headers
*
*
* When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects
* are private. Only the owner has full access control. When adding a new object, you can grant permissions to
* individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL
* on the object. For more information, see Access Control List (ACL) Overview
* and Managing ACLs Using the
* REST API.
*
*
* Storage Class Options
*
*
* You can use the CopyObject
operation to change the storage class of an object that is already stored
* in Amazon S3 using the StorageClass
parameter. For more information, see Storage Classes in the
* Amazon S3 Service Developer Guide.
*
*
* Versioning
*
*
* By default, x-amz-copy-source
identifies the current version of an object to copy. If the current
* version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the
* versionId
subresource.
*
*
* If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being
* copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID
* of the copied object in the x-amz-version-id
response header in the response.
*
*
* If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is
* always null.
*
*
* If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as
* a source object for the copy operation. For more information, see RestoreObject.
*
*
* The following operations are related to CopyObject
:
*
*
*
* For more information, see Copying Objects.
*
*
* @param copyObjectRequest
* @return Result of the CopyObject operation returned by the service.
* @throws ObjectNotInActiveTierErrorException
* The source object of the COPY operation is not in the active tier and is only stored in Amazon S3
* Glacier.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.CopyObject
*/
@Override
public CopyObjectResponse copyObject(CopyObjectRequest copyObjectRequest) throws ObjectNotInActiveTierErrorException,
AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
CopyObjectResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, copyObjectRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CopyObject");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("CopyObject").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(copyObjectRequest)
.withMarshaller(new CopyObjectRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID
* to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you
* become the bucket owner.
*
*
* Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Working with Amazon S3 buckets.
*
*
* If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
*
*
* By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the
* request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements.
* For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe
* (Ireland) Region. For more information, see Accessing a
* bucket.
*
*
*
* If you send your create bucket request to the s3.amazonaws.com
endpoint, the request goes to the
* us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the
* Region, even if the location constraint in the request specifies another Region where the bucket is to be
* created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to
* handle 307 redirect. For more information, see Virtual hosting of buckets.
*
*
*
* When creating a bucket using this operation, you can optionally specify the accounts or groups that should be
* granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the
* request headers.
*
*
* -
*
* Specify a canned ACL using the x-amz-acl
request header. Amazon S3 supports a set of predefined
* ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more
* information, see Canned
* ACL.
*
*
* -
*
* Specify access permissions explicitly using the x-amz-grant-read
, x-amz-grant-write
,
* x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
* headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access control list (ACL) overview.
*
*
* You specify each grantee as a type=value pair, where the type is one of the following:
*
*
* -
*
* id
– if the value specified is the canonical user ID of an AWS account
*
*
* -
*
* uri
– if you are granting permissions to a predefined group
*
*
* -
*
* emailAddress
– if the value specified is the email address of an AWS account
*
*
*
* Using email addresses to specify a grantee is only supported in the following AWS Regions:
*
*
* -
*
* US East (N. Virginia)
*
*
* -
*
* US West (N. California)
*
*
* -
*
* US West (Oregon)
*
*
* -
*
* Asia Pacific (Singapore)
*
*
* -
*
* Asia Pacific (Sydney)
*
*
* -
*
* Asia Pacific (Tokyo)
*
*
* -
*
* Europe (Ireland)
*
*
* -
*
* South America (São Paulo)
*
*
*
*
* For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS
* General Reference.
*
*
*
*
* For example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs
* permissions to read object data and its metadata:
*
*
* x-amz-grant-read: id="11112222333", id="444455556666"
*
*
*
*
*
* You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
*
*
*
* The following operations are related to CreateBucket
:
*
*
* -
*
* PutObject
*
*
* -
*
* DeleteBucket
*
*
*
*
* @param createBucketRequest
* @return Result of the CreateBucket operation returned by the service.
* @throws BucketAlreadyExistsException
* The requested bucket name is not available. The bucket namespace is shared by all users of the system.
* Select a different name and try again.
* @throws BucketAlreadyOwnedByYouException
* The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS
* Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing
* bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket
* access control lists (ACLs).
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.CreateBucket
*/
@Override
public CreateBucketResponse createBucket(CreateBucketRequest createBucketRequest) throws BucketAlreadyExistsException,
BucketAlreadyOwnedByYouException, AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
CreateBucketResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, createBucketRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateBucket");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("CreateBucket").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createBucketRequest)
.withMarshaller(new CreateBucketRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of
* the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part
* requests (see UploadPart). You
* also include this upload ID in the final request to either complete or abort the multipart upload request.
*
*
* For more information about multipart uploads, see Multipart Upload Overview.
*
*
* If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within
* the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload
* becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see
* Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
*
*
* For information about the permissions required to use the multipart upload API, see Multipart Upload API and
* Permissions.
*
*
* For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send
* one or more requests to upload parts, and then complete the multipart upload process. You sign each request
* individually. There is nothing special about signing multipart upload requests. For more information about
* signing, see Authenticating
* Requests (AWS Signature Version 4).
*
*
*
* After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the
* uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to
* store the parts and stop charging you for storing them only after you either complete or abort a multipart
* upload.
*
*
*
* You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it
* writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption
* key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption
* keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must
* match the headers you used in the request to initiate the upload by using CreateMultipartUpload
.
*
*
* To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the
* kms:Encrypt
, kms:Decrypt
, kms:ReEncrypt*
,
* kms:GenerateDataKey*
, and kms:DescribeKey
actions on the key. These permissions are
* required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the
* multipart upload.
*
*
* If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then
* you must have these permissions on the key policy. If your IAM user or role belongs to a different account than
* the key, then you must have the permissions on both the key policy and your IAM user or role.
*
*
* For more information, see Protecting Data Using
* Server-Side Encryption.
*
*
* - Access Permissions
* -
*
* When copying an object, you can optionally specify the accounts or groups that should be granted specific
* permissions on the new object. There are two ways to grant the permissions using the request headers:
*
*
* -
*
* Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
*
*
* -
*
* Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
,
* x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to
* the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
*
*
*
*
* You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
*
*
* - Server-Side- Encryption-Specific Request Headers
* -
*
* You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is
* for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and
* decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys
* or provide your own encryption key.
*
*
* -
*
* Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS
* KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.
*
*
* -
*
* x-amz-server-side-encryption
*
*
* -
*
* x-amz-server-side-encryption-aws-kms-key-id
*
*
* -
*
* x-amz-server-side-encryption-context
*
*
*
*
*
* If you specify x-amz-server-side-encryption:aws:kms
, but don't provide
* x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the AWS managed CMK in AWS KMS to
* protect the data.
*
*
*
* All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using
* SigV4.
*
*
*
* For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side
* Encryption with CMKs stored in AWS KMS.
*
*
* -
*
* Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following
* headers in the request.
*
*
* -
*
* x-amz-server-side-encryption-customer-algorithm
*
*
* -
*
* x-amz-server-side-encryption-customer-key
*
*
* -
*
* x-amz-server-side-encryption-customer-key-MD5
*
*
*
*
* For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side
* Encryption with CMKs stored in AWS KMS.
*
*
*
*
* - Access-Control-List (ACL)-Specific Request Headers
* -
*
* You also can use the following access control–related headers with this operation. By default, all objects are
* private. Only the owner has full access control. When adding a new object, you can grant permissions to
* individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the
* access control list (ACL) on the object. For more information, see Using ACLs. With this
* operation, you can grant access permissions using one of the following two methods:
*
*
* -
*
* Specify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of predefined ACLs, known as canned
* ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
*
*
* -
*
* Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or
* groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL.
* For more information, see Access
* Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission.
* To grant permissions explicitly, use:
*
*
* -
*
* x-amz-grant-read
*
*
* -
*
* x-amz-grant-write
*
*
* -
*
* x-amz-grant-read-acp
*
*
* -
*
* x-amz-grant-write-acp
*
*
* -
*
* x-amz-grant-full-control
*
*
*
*
* You specify each grantee as a type=value pair, where the type is one of the following:
*
*
* -
*
* id
– if the value specified is the canonical user ID of an AWS account
*
*
* -
*
* uri
– if you are granting permissions to a predefined group
*
*
* -
*
* emailAddress
– if the value specified is the email address of an AWS account
*
*
*
* Using email addresses to specify a grantee is only supported in the following AWS Regions:
*
*
* -
*
* US East (N. Virginia)
*
*
* -
*
* US West (N. California)
*
*
* -
*
* US West (Oregon)
*
*
* -
*
* Asia Pacific (Singapore)
*
*
* -
*
* Asia Pacific (Sydney)
*
*
* -
*
* Asia Pacific (Tokyo)
*
*
* -
*
* Europe (Ireland)
*
*
* -
*
* South America (São Paulo)
*
*
*
*
* For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS
* General Reference.
*
*
*
*
* For example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs
* permissions to read object data and its metadata:
*
*
* x-amz-grant-read: id="11112222333", id="444455556666"
*
*
*
*
*
*
* The following operations are related to CreateMultipartUpload
:
*
*
* -
*
* UploadPart
*
*
* -
*
*
* -
*
*
* -
*
* ListParts
*
*
* -
*
*
*
*
* @param createMultipartUploadRequest
* @return Result of the CreateMultipartUpload operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.CreateMultipartUpload
*/
@Override
public CreateMultipartUploadResponse createMultipartUpload(CreateMultipartUploadRequest createMultipartUploadRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(CreateMultipartUploadResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, createMultipartUploadRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CreateMultipartUpload");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("CreateMultipartUpload").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(createMultipartUploadRequest)
.withMarshaller(new CreateMultipartUploadRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be
* deleted before the bucket itself can be deleted.
*
*
* Related Resources
*
*
* -
*
* CreateBucket
*
*
* -
*
* DeleteObject
*
*
*
*
* @param deleteBucketRequest
* @return Result of the DeleteBucket operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucket
*/
@Override
public DeleteBucketResponse deleteBucket(DeleteBucketRequest deleteBucketRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
DeleteBucketResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBucketRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucket");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteBucket").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketRequest)
.withMarshaller(new DeleteBucketRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).
*
*
* To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage
* Class Analysis.
*
*
* The following operations are related to DeleteBucketAnalyticsConfiguration
:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param deleteBucketAnalyticsConfigurationRequest
* @return Result of the DeleteBucketAnalyticsConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketAnalyticsConfiguration
*/
@Override
public DeleteBucketAnalyticsConfigurationResponse deleteBucketAnalyticsConfiguration(
DeleteBucketAnalyticsConfigurationRequest deleteBucketAnalyticsConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketAnalyticsConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
deleteBucketAnalyticsConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketAnalyticsConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketAnalyticsConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketAnalyticsConfigurationRequest)
.withMarshaller(new DeleteBucketAnalyticsConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes the cors
configuration information set for the bucket.
*
*
* To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket
* owner has this permission by default and can grant this permission to others.
*
*
* For information about cors
, see Enabling Cross-Origin Resource Sharing in
* the Amazon Simple Storage Service Developer Guide.
*
*
* Related Resources:
*
*
* -
*
* PutBucketCors
*
*
* -
*
*
*
*
* @param deleteBucketCorsRequest
* @return Result of the DeleteBucketCors operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketCors
*/
@Override
public DeleteBucketCorsResponse deleteBucketCors(DeleteBucketCorsRequest deleteBucketCorsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
DeleteBucketCorsResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBucketCorsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketCors");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketCors").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketCorsRequest)
.withMarshaller(new DeleteBucketCorsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This implementation of the DELETE operation removes default encryption from the bucket. For information about the
* Amazon S3 default encryption feature, see Amazon S3 Default Bucket
* Encryption in the Amazon Simple Storage Service Developer Guide.
*
*
* To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
* action. The bucket owner has this permission by default. The bucket owner can grant this permission to others.
* For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your
* Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
*
*
* Related Resources
*
*
* -
*
*
* -
*
*
*
*
* @param deleteBucketEncryptionRequest
* @return Result of the DeleteBucketEncryption operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketEncryption
*/
@Override
public DeleteBucketEncryptionResponse deleteBucketEncryption(DeleteBucketEncryptionRequest deleteBucketEncryptionRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketEncryptionResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBucketEncryptionRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketEncryption");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketEncryption").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketEncryptionRequest)
.withMarshaller(new DeleteBucketEncryptionRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
*
*
* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to
* the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering
* delivers automatic cost savings by moving data between access tiers, when access patterns change.
*
*
* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at
* least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects
* can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering
* storage class.
*
*
* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30
* days. For more information, see Storage
* class for automatically optimizing frequently and infrequently accessed objects.
*
*
* Operations related to DeleteBucketIntelligentTieringConfiguration
include:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param deleteBucketIntelligentTieringConfigurationRequest
* @return Result of the DeleteBucketIntelligentTieringConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketIntelligentTieringConfiguration
*/
@Override
public DeleteBucketIntelligentTieringConfigurationResponse deleteBucketIntelligentTieringConfiguration(
DeleteBucketIntelligentTieringConfigurationRequest deleteBucketIntelligentTieringConfigurationRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketIntelligentTieringConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
deleteBucketIntelligentTieringConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketIntelligentTieringConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketIntelligentTieringConfiguration")
.withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector)
.withInput(deleteBucketIntelligentTieringConfigurationRequest)
.withMarshaller(new DeleteBucketIntelligentTieringConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes an inventory configuration (identified by the inventory ID) from the bucket.
*
*
* To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
*
*
* Operations related to DeleteBucketInventoryConfiguration
include:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param deleteBucketInventoryConfigurationRequest
* @return Result of the DeleteBucketInventoryConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketInventoryConfiguration
*/
@Override
public DeleteBucketInventoryConfigurationResponse deleteBucketInventoryConfiguration(
DeleteBucketInventoryConfigurationRequest deleteBucketInventoryConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketInventoryConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
deleteBucketInventoryConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketInventoryConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketInventoryConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketInventoryConfigurationRequest)
.withMarshaller(new DeleteBucketInventoryConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration
* rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer
* automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.
*
*
* To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration
action.
* By default, the bucket owner has this permission and the bucket owner can grant this permission to others.
*
*
* There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3
* systems.
*
*
* For more information about the object expiration, see Elements to Describe Lifecycle Actions.
*
*
* Related actions include:
*
*
* -
*
*
* -
*
*
*
*
* @param deleteBucketLifecycleRequest
* @return Result of the DeleteBucketLifecycle operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketLifecycle
*/
@Override
public DeleteBucketLifecycleResponse deleteBucketLifecycle(DeleteBucketLifecycleRequest deleteBucketLifecycleRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketLifecycleResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBucketLifecycleRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketLifecycle");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketLifecycle").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketLifecycleRequest)
.withMarshaller(new DeleteBucketLifecycleRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration
* ID) from the bucket. Note that this doesn't include the daily storage metrics.
*
*
* To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon
* CloudWatch.
*
*
* The following operations are related to DeleteBucketMetricsConfiguration
:
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param deleteBucketMetricsConfigurationRequest
* @return Result of the DeleteBucketMetricsConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketMetricsConfiguration
*/
@Override
public DeleteBucketMetricsConfigurationResponse deleteBucketMetricsConfiguration(
DeleteBucketMetricsConfigurationRequest deleteBucketMetricsConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketMetricsConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
deleteBucketMetricsConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketMetricsConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketMetricsConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketMetricsConfigurationRequest)
.withMarshaller(new DeleteBucketMetricsConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Removes OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the
* s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a
* Policy.
*
*
* For information about Amazon S3 Object Ownership, see Using Object Ownership.
*
*
* The following operations are related to DeleteBucketOwnershipControls
:
*
*
* -
*
*
* -
*
*
*
*
* @param deleteBucketOwnershipControlsRequest
* @return Result of the DeleteBucketOwnershipControls operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketOwnershipControls
*/
@Override
public DeleteBucketOwnershipControlsResponse deleteBucketOwnershipControls(
DeleteBucketOwnershipControlsRequest deleteBucketOwnershipControlsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketOwnershipControlsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
deleteBucketOwnershipControlsRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketOwnershipControls");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketOwnershipControls").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketOwnershipControlsRequest)
.withMarshaller(new DeleteBucketOwnershipControlsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified
* bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the
* calling identity must have the DeleteBucketPolicy
permissions on the specified bucket and belong to
* the bucket owner's account to use this operation.
*
*
* If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
* error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's
* account, Amazon S3 returns a 405 Method Not Allowed
error.
*
*
*
* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even
* if the policy explicitly denies the root user the ability to perform this action.
*
*
*
* For more information about bucket policies, see Using Bucket Policies and
* UserPolicies.
*
*
* The following operations are related to DeleteBucketPolicy
*
*
* -
*
* CreateBucket
*
*
* -
*
* DeleteObject
*
*
*
*
* @param deleteBucketPolicyRequest
* @return Result of the DeleteBucketPolicy operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketPolicy
*/
@Override
public DeleteBucketPolicyResponse deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketPolicyResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBucketPolicyRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketPolicy");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketPolicy").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketPolicyRequest)
.withMarshaller(new DeleteBucketPolicyRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes the replication configuration from the bucket.
*
*
* To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
* action. The bucket owner has these permissions by default and can grant it to others. For more information about
* permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
*
* It can take a while for the deletion of a replication configuration to fully propagate.
*
*
*
* For information about replication configuration, see Replication in the Amazon S3
* Developer Guide.
*
*
* The following operations are related to DeleteBucketReplication
:
*
*
* -
*
*
* -
*
*
*
*
* @param deleteBucketReplicationRequest
* @return Result of the DeleteBucketReplication operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketReplication
*/
@Override
public DeleteBucketReplicationResponse deleteBucketReplication(DeleteBucketReplicationRequest deleteBucketReplicationRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketReplicationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBucketReplicationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketReplication");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketReplication").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketReplicationRequest)
.withMarshaller(new DeleteBucketReplicationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Deletes the tags from the bucket.
*
*
* To use this operation, you must have permission to perform the s3:PutBucketTagging
action. By
* default, the bucket owner has this permission and can grant this permission to others.
*
*
* The following operations are related to DeleteBucketTagging
:
*
*
* -
*
* GetBucketTagging
*
*
* -
*
* PutBucketTagging
*
*
*
*
* @param deleteBucketTaggingRequest
* @return Result of the DeleteBucketTagging operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketTagging
*/
@Override
public DeleteBucketTaggingResponse deleteBucketTagging(DeleteBucketTaggingRequest deleteBucketTaggingRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketTaggingResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBucketTaggingRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketTagging");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketTagging").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketTaggingRequest)
.withMarshaller(new DeleteBucketTaggingRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response
* upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
* response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a
* 404
response if the bucket specified in the request does not exist.
*
*
* This DELETE operation requires the S3:DeleteBucketWebsite
permission. By default, only the bucket
* owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users
* permission to delete the website configuration by writing a bucket policy granting them the
* S3:DeleteBucketWebsite
permission.
*
*
* For more information about hosting websites, see Hosting Websites on Amazon S3.
*
*
* The following operations are related to DeleteBucketWebsite
:
*
*
* -
*
* GetBucketWebsite
*
*
* -
*
* PutBucketWebsite
*
*
*
*
* @param deleteBucketWebsiteRequest
* @return Result of the DeleteBucketWebsite operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteBucketWebsite
*/
@Override
public DeleteBucketWebsiteResponse deleteBucketWebsite(DeleteBucketWebsiteRequest deleteBucketWebsiteRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteBucketWebsiteResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteBucketWebsiteRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteBucketWebsite");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteBucketWebsite").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteBucketWebsiteRequest)
.withMarshaller(new DeleteBucketWebsiteRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest
* version of the object. If there isn't a null version, Amazon S3 does not remove any objects.
*
*
* To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using
* this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the
* response header, x-amz-delete-marker
, to true.
*
*
* If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled,
* you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests
* that include x-amz-mfa
must use HTTPS.
*
*
* For more information about MFA Delete, see Using MFA Delete. To see sample
* requests that use versioning, see Sample
* Request.
*
*
* You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to
* enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects
* from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and
* s3:PutLifeCycleConfiguration
actions.
*
*
* The following operation is related to DeleteObject
:
*
*
* -
*
* PutObject
*
*
*
*
* @param deleteObjectRequest
* @return Result of the DeleteObject operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteObject
*/
@Override
public DeleteObjectResponse deleteObject(DeleteObjectRequest deleteObjectRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
DeleteObjectResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteObjectRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteObject");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteObject").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteObjectRequest)
.withMarshaller(new DeleteObjectRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.
*
*
* To use this operation, you must have permission to perform the s3:DeleteObjectTagging
action.
*
*
* To delete tags of a specific object version, add the versionId
query parameter in the request. You
* will need permission for the s3:DeleteObjectVersionTagging
action.
*
*
* The following operations are related to DeleteBucketMetricsConfiguration
:
*
*
* -
*
* PutObjectTagging
*
*
* -
*
* GetObjectTagging
*
*
*
*
* @param deleteObjectTaggingRequest
* @return Result of the DeleteObjectTagging operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteObjectTagging
*/
@Override
public DeleteObjectTaggingResponse deleteObjectTagging(DeleteObjectTaggingRequest deleteObjectTaggingRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeleteObjectTaggingResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteObjectTaggingRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteObjectTagging");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteObjectTagging").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteObjectTaggingRequest)
.withMarshaller(new DeleteObjectTaggingRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the
* object keys that you want to delete, then this operation provides a suitable alternative to sending individual
* delete requests, reducing per-request overhead.
*
*
* The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key
* names, and optionally, version IDs if you want to delete a specific version of the object from a
* versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that
* delete, success, or failure, in the response. Note that if the object specified in the request is not found,
* Amazon S3 returns the result as deleted.
*
*
* The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode
* in which the response includes the result of deletion of each key in your request. In quiet mode the response
* includes only keys where the delete operation encountered an error. For a successful deletion, the operation does
* not return any information about the delete in the response body.
*
*
* When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects,
* you must include an MFA token. If you do not provide one, the entire request will fail, even if there are
* non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys
* in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA
* Delete.
*
*
* Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value
* to ensure that your request body has not been altered in transit.
*
*
* The following operations are related to DeleteObjects
:
*
*
* -
*
*
* -
*
* UploadPart
*
*
* -
*
*
* -
*
* ListParts
*
*
* -
*
*
*
*
* @param deleteObjectsRequest
* @return Result of the DeleteObjects operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeleteObjects
*/
@Override
public DeleteObjectsResponse deleteObjects(DeleteObjectsRequest deleteObjectsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
DeleteObjectsResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deleteObjectsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeleteObjects");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("DeleteObjects").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deleteObjectsRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new DeleteObjectsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Removes the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must
* have the s3:PutBucketPublicAccessBlock
permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* The following operations are related to DeletePublicAccessBlock
:
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param deletePublicAccessBlockRequest
* @return Result of the DeletePublicAccessBlock operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.DeletePublicAccessBlock
*/
@Override
public DeletePublicAccessBlockResponse deletePublicAccessBlock(DeletePublicAccessBlockRequest deletePublicAccessBlockRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(DeletePublicAccessBlockResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, deletePublicAccessBlockRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DeletePublicAccessBlock");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("DeletePublicAccessBlock").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(deletePublicAccessBlockRequest)
.withMarshaller(new DeletePublicAccessBlockRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This implementation of the GET operation uses the accelerate
subresource to return the Transfer
* Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3
* Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from
* Amazon S3.
*
*
* To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your
* Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
*
*
* You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
* by using the
* PutBucketAccelerateConfiguration operation.
*
*
* A GET accelerate
request does not return a state value for a bucket that has no transfer
* acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
*
*
* For more information about transfer acceleration, see Transfer Acceleration in
* the Amazon Simple Storage Service Developer Guide.
*
*
* Related Resources
*
*
* -
*
*
*
*
* @param getBucketAccelerateConfigurationRequest
* @return Result of the GetBucketAccelerateConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketAccelerateConfiguration
*/
@Override
public GetBucketAccelerateConfigurationResponse getBucketAccelerateConfiguration(
GetBucketAccelerateConfigurationRequest getBucketAccelerateConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketAccelerateConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
getBucketAccelerateConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketAccelerateConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketAccelerateConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketAccelerateConfigurationRequest)
.withMarshaller(new GetBucketAccelerateConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This implementation of the GET
operation uses the acl
subresource to return the access
* control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have
* READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user,
* you can return the ACL of the bucket without using an authorization header.
*
*
* Related Resources
*
*
* -
*
* ListObjects
*
*
*
*
* @param getBucketAclRequest
* @return Result of the GetBucketAcl operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketAcl
*/
@Override
public GetBucketAclResponse getBucketAcl(GetBucketAclRequest getBucketAclRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetBucketAclResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketAclRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketAcl");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketAcl").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketAclRequest)
.withMarshaller(new GetBucketAclRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This implementation of the GET operation returns an analytics configuration (identified by the analytics
* configuration ID) from the bucket.
*
*
* To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
*
*
* For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage
* Class Analysis in the Amazon Simple Storage Service Developer Guide.
*
*
* Related Resources
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketAnalyticsConfigurationRequest
* @return Result of the GetBucketAnalyticsConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketAnalyticsConfiguration
*/
@Override
public GetBucketAnalyticsConfigurationResponse getBucketAnalyticsConfiguration(
GetBucketAnalyticsConfigurationRequest getBucketAnalyticsConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketAnalyticsConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
getBucketAnalyticsConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketAnalyticsConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketAnalyticsConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketAnalyticsConfigurationRequest)
.withMarshaller(new GetBucketAnalyticsConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the cors configuration information set for the bucket.
*
*
* To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket
* owner has this permission and can grant it to others.
*
*
* For more information about cors, see
* Enabling Cross-Origin Resource Sharing.
*
*
* The following operations are related to GetBucketCors
:
*
*
* -
*
* PutBucketCors
*
*
* -
*
* DeleteBucketCors
*
*
*
*
* @param getBucketCorsRequest
* @return Result of the GetBucketCors operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketCors
*/
@Override
public GetBucketCorsResponse getBucketCors(GetBucketCorsRequest getBucketCorsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetBucketCorsResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketCorsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketCors");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketCors").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketCorsRequest)
.withMarshaller(new GetBucketCorsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default
* encryption feature, see Amazon
* S3 Default Bucket Encryption.
*
*
* To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* The following operations are related to GetBucketEncryption
:
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketEncryptionRequest
* @return Result of the GetBucketEncryption operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketEncryption
*/
@Override
public GetBucketEncryptionResponse getBucketEncryption(GetBucketEncryptionRequest getBucketEncryptionRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketEncryptionResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketEncryptionRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketEncryption");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketEncryption").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketEncryptionRequest)
.withMarshaller(new GetBucketEncryptionRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Gets the S3 Intelligent-Tiering configuration from the specified bucket.
*
*
* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to
* the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering
* delivers automatic cost savings by moving data between access tiers, when access patterns change.
*
*
* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at
* least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects
* can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering
* storage class.
*
*
* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30
* days. For more information, see Storage
* class for automatically optimizing frequently and infrequently accessed objects.
*
*
* Operations related to GetBucketIntelligentTieringConfiguration
include:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketIntelligentTieringConfigurationRequest
* @return Result of the GetBucketIntelligentTieringConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketIntelligentTieringConfiguration
*/
@Override
public GetBucketIntelligentTieringConfigurationResponse getBucketIntelligentTieringConfiguration(
GetBucketIntelligentTieringConfigurationRequest getBucketIntelligentTieringConfigurationRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketIntelligentTieringConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
getBucketIntelligentTieringConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketIntelligentTieringConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketIntelligentTieringConfiguration")
.withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector)
.withInput(getBucketIntelligentTieringConfigurationRequest)
.withMarshaller(new GetBucketIntelligentTieringConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.
*
*
* To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action.
* The bucket owner has this permission by default and can grant this permission to others. For more information
* about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
*
*
* The following operations are related to GetBucketInventoryConfiguration
:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketInventoryConfigurationRequest
* @return Result of the GetBucketInventoryConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketInventoryConfiguration
*/
@Override
public GetBucketInventoryConfigurationResponse getBucketInventoryConfiguration(
GetBucketInventoryConfigurationRequest getBucketInventoryConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketInventoryConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
getBucketInventoryConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketInventoryConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketInventoryConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketInventoryConfigurationRequest)
.withMarshaller(new GetBucketInventoryConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
*
* Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or
* more object tags, or a combination of both. Accordingly, this section describes the latest API. The response
* describes the new filter element that you can use to specify a filter to select a subset of objects to which the
* rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier
* API description, see GetBucketLifecycle.
*
*
*
* Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration,
* see Object Lifecycle
* Management.
*
*
* To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action.
* The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* GetBucketLifecycleConfiguration
has the following special error:
*
*
* -
*
* Error code: NoSuchLifecycleConfiguration
*
*
* -
*
* Description: The lifecycle configuration does not exist.
*
*
* -
*
* HTTP Status Code: 404 Not Found
*
*
* -
*
* SOAP Fault Code Prefix: Client
*
*
*
*
*
*
* The following operations are related to GetBucketLifecycleConfiguration
:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketLifecycleConfigurationRequest
* @return Result of the GetBucketLifecycleConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketLifecycleConfiguration
*/
@Override
public GetBucketLifecycleConfigurationResponse getBucketLifecycleConfiguration(
GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketLifecycleConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
getBucketLifecycleConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketLifecycleConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketLifecycleConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketLifecycleConfigurationRequest)
.withMarshaller(new GetBucketLifecycleConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint
* request parameter in a CreateBucket
request. For more information, see CreateBucket.
*
*
* To use this implementation of the operation, you must be the bucket owner.
*
*
* The following operations are related to GetBucketLocation
:
*
*
* -
*
* GetObject
*
*
* -
*
* CreateBucket
*
*
*
*
* @param getBucketLocationRequest
* @return Result of the GetBucketLocation operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketLocation
*/
@Override
public GetBucketLocationResponse getBucketLocation(GetBucketLocationRequest getBucketLocationRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetBucketLocationResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketLocationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketLocation");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketLocation").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketLocationRequest)
.withMarshaller(new GetBucketLocationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET,
* you must be the bucket owner.
*
*
* The following operations are related to GetBucketLogging
:
*
*
* -
*
* CreateBucket
*
*
* -
*
* PutBucketLogging
*
*
*
*
* @param getBucketLoggingRequest
* @return Result of the GetBucketLogging operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketLogging
*/
@Override
public GetBucketLoggingResponse getBucketLogging(GetBucketLoggingRequest getBucketLoggingRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetBucketLoggingResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketLoggingRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketLogging");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketLogging").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketLoggingRequest)
.withMarshaller(new GetBucketLoggingRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't
* include the daily storage metrics.
*
*
* To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon
* CloudWatch.
*
*
* The following operations are related to GetBucketMetricsConfiguration
:
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketMetricsConfigurationRequest
* @return Result of the GetBucketMetricsConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketMetricsConfiguration
*/
@Override
public GetBucketMetricsConfigurationResponse getBucketMetricsConfiguration(
GetBucketMetricsConfigurationRequest getBucketMetricsConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketMetricsConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
getBucketMetricsConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketMetricsConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketMetricsConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketMetricsConfigurationRequest)
.withMarshaller(new GetBucketMetricsConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the notification configuration of a bucket.
*
*
* If notifications are not enabled on the bucket, the operation returns an empty
* NotificationConfiguration
element.
*
*
* By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket
* owner can use a bucket policy to grant permission to other users to read this configuration with the
* s3:GetBucketNotification
permission.
*
*
* For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket
* Events. For more information about bucket policies, see Using Bucket Policies.
*
*
* The following operation is related to GetBucketNotification
:
*
*
* -
*
*
*
*
* @param getBucketNotificationConfigurationRequest
* @return Result of the GetBucketNotificationConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketNotificationConfiguration
*/
@Override
public GetBucketNotificationConfigurationResponse getBucketNotificationConfiguration(
GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketNotificationConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
getBucketNotificationConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketNotificationConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketNotificationConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketNotificationConfigurationRequest)
.withMarshaller(new GetBucketNotificationConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Retrieves OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the
* s3:GetBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a
* Policy.
*
*
* For information about Amazon S3 Object Ownership, see Using Object Ownership.
*
*
* The following operations are related to GetBucketOwnershipControls
:
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketOwnershipControlsRequest
* @return Result of the GetBucketOwnershipControls operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketOwnershipControls
*/
@Override
public GetBucketOwnershipControlsResponse getBucketOwnershipControls(
GetBucketOwnershipControlsRequest getBucketOwnershipControlsRequest) throws AwsServiceException, SdkClientException,
S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketOwnershipControlsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketOwnershipControlsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketOwnershipControls");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketOwnershipControls").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketOwnershipControlsRequest)
.withMarshaller(new GetBucketOwnershipControlsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS
* account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the
* specified bucket and belong to the bucket owner's account in order to use this operation.
*
*
* If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
* error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's
* account, Amazon S3 returns a 405 Method Not Allowed
error.
*
*
*
* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even
* if the policy explicitly denies the root user the ability to perform this action.
*
*
*
* For more information about bucket policies, see Using Bucket Policies and User
* Policies.
*
*
* The following operation is related to GetBucketPolicy
:
*
*
* -
*
* GetObject
*
*
*
*
* @param getBucketPolicyRequest
* @return Result of the GetBucketPolicy operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketPolicy
*/
@Override
public GetBucketPolicyResponse getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetBucketPolicyResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketPolicyRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketPolicy");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketPolicy").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketPolicyRequest)
.withMarshaller(new GetBucketPolicyRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use
* this operation, you must have the s3:GetBucketPolicyStatus
permission. For more information about
* Amazon S3 permissions, see Specifying Permissions in a
* Policy.
*
*
* For more information about when Amazon S3 considers a bucket public, see The Meaning of "Public".
*
*
* The following operations are related to GetBucketPolicyStatus
:
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketPolicyStatusRequest
* @return Result of the GetBucketPolicyStatus operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketPolicyStatus
*/
@Override
public GetBucketPolicyStatusResponse getBucketPolicyStatus(GetBucketPolicyStatusRequest getBucketPolicyStatusRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketPolicyStatusResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketPolicyStatusRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketPolicyStatus");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketPolicyStatus").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketPolicyStatusRequest)
.withMarshaller(new GetBucketPolicyStatusRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the replication configuration of a bucket.
*
*
*
* It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems.
* Therefore, a get request soon after put or delete can return a wrong result.
*
*
*
* For information about replication configuration, see Replication in the Amazon Simple
* Storage Service Developer Guide.
*
*
* This operation requires permissions for the s3:GetReplicationConfiguration
action. For more
* information about permissions, see Using Bucket Policies and User
* Policies.
*
*
* If you include the Filter
element in a replication configuration, you must also include the
* DeleteMarkerReplication
and Priority
elements. The response also returns those
* elements.
*
*
* For information about GetBucketReplication
errors, see List of
* replication-related error codes
*
*
* The following operations are related to GetBucketReplication
:
*
*
* -
*
*
* -
*
*
*
*
* @param getBucketReplicationRequest
* @return Result of the GetBucketReplication operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketReplication
*/
@Override
public GetBucketReplicationResponse getBucketReplication(GetBucketReplicationRequest getBucketReplicationRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketReplicationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketReplicationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketReplication");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketReplication").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketReplicationRequest)
.withMarshaller(new GetBucketReplicationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the request payment configuration of a bucket. To use this version of the operation, you must be the
* bucket owner. For more information, see Requester Pays Buckets.
*
*
* The following operations are related to GetBucketRequestPayment
:
*
*
* -
*
* ListObjects
*
*
*
*
* @param getBucketRequestPaymentRequest
* @return Result of the GetBucketRequestPayment operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketRequestPayment
*/
@Override
public GetBucketRequestPaymentResponse getBucketRequestPayment(GetBucketRequestPaymentRequest getBucketRequestPaymentRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketRequestPaymentResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketRequestPaymentRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketRequestPayment");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetBucketRequestPayment").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketRequestPaymentRequest)
.withMarshaller(new GetBucketRequestPaymentRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the tag set associated with the bucket.
*
*
* To use this operation, you must have permission to perform the s3:GetBucketTagging
action. By
* default, the bucket owner has this permission and can grant this permission to others.
*
*
* GetBucketTagging
has the following special error:
*
*
* -
*
* Error code: NoSuchTagSetError
*
*
* -
*
* Description: There is no tag set associated with the bucket.
*
*
*
*
*
*
* The following operations are related to GetBucketTagging
:
*
*
* -
*
* PutBucketTagging
*
*
* -
*
*
*
*
* @param getBucketTaggingRequest
* @return Result of the GetBucketTagging operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketTagging
*/
@Override
public GetBucketTaggingResponse getBucketTagging(GetBucketTaggingRequest getBucketTaggingRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetBucketTaggingResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketTaggingRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketTagging");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketTagging").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketTaggingRequest)
.withMarshaller(new GetBucketTaggingRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the versioning state of a bucket.
*
*
* To retrieve the versioning state of a bucket, you must be the bucket owner.
*
*
* This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is
* enabled
, the bucket owner must use an authentication device to change the versioning state of the
* bucket.
*
*
* The following operations are related to GetBucketVersioning
:
*
*
* -
*
* GetObject
*
*
* -
*
* PutObject
*
*
* -
*
* DeleteObject
*
*
*
*
* @param getBucketVersioningRequest
* @return Result of the GetBucketVersioning operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketVersioning
*/
@Override
public GetBucketVersioningResponse getBucketVersioning(GetBucketVersioningRequest getBucketVersioningRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetBucketVersioningResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketVersioningRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketVersioning");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketVersioning").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketVersioningRequest)
.withMarshaller(new GetBucketVersioningRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as
* website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
*
*
* This GET operation requires the S3:GetBucketWebsite
permission. By default, only the bucket owner
* can read the bucket website configuration. However, bucket owners can allow other users to read the website
* configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
*
*
* The following operations are related to DeleteBucketWebsite
:
*
*
* -
*
*
* -
*
* PutBucketWebsite
*
*
*
*
* @param getBucketWebsiteRequest
* @return Result of the GetBucketWebsite operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetBucketWebsite
*/
@Override
public GetBucketWebsiteResponse getBucketWebsite(GetBucketWebsiteRequest getBucketWebsiteRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetBucketWebsiteResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getBucketWebsiteRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetBucketWebsite");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetBucketWebsite").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getBucketWebsiteRequest)
.withMarshaller(new GetBucketWebsiteRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Retrieves objects from Amazon S3. To use GET
, you must have READ
access to the object.
* If you grant READ
access to the anonymous user, you can return the object without using an
* authorization header.
*
*
* An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can,
* however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead
* of naming an object sample.jpg
, you can name it photos/2006/February/sample.jpg
.
*
*
* To get an object from such a logical hierarchy, specify the full key name for the object in the GET
* operation. For a virtual hosted-style request example, if you have the object
* photos/2006/February/sample.jpg
, specify the resource as
* /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object
* photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the resource
* as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host
* Header Bucket Specification.
*
*
* To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information,
* see Amazon S3 Torrent. For more
* information about returning the ACL of an object, see GetObjectAcl.
*
*
* If the object you are retrieving is stored in the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering
* Archive, or S3 Intelligent-Tiering Deep Archive storage classes, before you can retrieve the object you must
* first restore a copy using RestoreObject. Otherwise, this
* operation returns an InvalidObjectStateError
error. For information about restoring archived
* objects, see Restoring Archived
* Objects.
*
*
* Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests
* if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with
* Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400
* BadRequest error.
*
*
* If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you
* store the object in Amazon S3, then when you GET the object, you must use the following headers:
*
*
* -
*
* x-amz-server-side-encryption-customer-algorithm
*
*
* -
*
* x-amz-server-side-encryption-customer-key
*
*
* -
*
* x-amz-server-side-encryption-customer-key-MD5
*
*
*
*
* For more information about SSE-C, see Server-Side
* Encryption (Using Customer-Provided Encryption Keys).
*
*
* Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging
* action), the response also returns the x-amz-tagging-count
header that provides the count of number
* of tags associated with the object. You can use GetObjectTagging to retrieve
* the tag set associated with an object.
*
*
* Permissions
*
*
* You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a
* Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also
* have the s3:ListBucket
permission.
*
*
* -
*
* If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return an HTTP status code
* 404 ("no such key") error.
*
*
* -
*
* If you don’t have the s3:ListBucket
permission, Amazon S3 will return an HTTP status code 403
* ("access denied") error.
*
*
*
*
* Versioning
*
*
* By default, the GET operation returns the current version of an object. To return a different version, use the
* versionId
subresource.
*
*
*
* If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and
* includes x-amz-delete-marker: true
in the response.
*
*
*
* For more information about versioning, see PutBucketVersioning.
*
*
* Overriding Response Header Values
*
*
* There are times when you want to override certain response header values in a GET response. For example, you
* might override the Content-Disposition response header value in your GET request.
*
*
* You can override values for a set of response headers using the following query parameters. These response header
* values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers
* you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an
* object. The response headers that you can override for the GET response are Content-Type
,
* Content-Language
, Expires
, Cache-Control
, Content-Disposition
* , and Content-Encoding
. To override these header values in the GET response, you use the following
* request parameters.
*
*
*
* You must sign the request, either using an Authorization header or a presigned URL, when using these parameters.
* They cannot be used with an unsigned (anonymous) request.
*
*
*
* -
*
* response-content-type
*
*
* -
*
* response-content-language
*
*
* -
*
* response-expires
*
*
* -
*
* response-cache-control
*
*
* -
*
* response-content-disposition
*
*
* -
*
* response-content-encoding
*
*
*
*
* Additional Considerations about Request Headers
*
*
* If both of the If-Match
and If-Unmodified-Since
headers are present in the request as
* follows: If-Match
condition evaluates to true
, and; If-Unmodified-Since
* condition evaluates to false
; then, S3 returns 200 OK and the data requested.
*
*
* If both of the If-None-Match
and If-Modified-Since
headers are present in the request
* as follows: If-None-Match
condition evaluates to false
, and;
* If-Modified-Since
condition evaluates to true
; then, S3 returns 304 Not Modified
* response code.
*
*
* For more information about conditional requests, see RFC 7232.
*
*
* The following operations are related to GetObject
:
*
*
* -
*
* ListBuckets
*
*
* -
*
* GetObjectAcl
*
*
*
*
* @param getObjectRequest
* @param responseTransformer
* Functional interface for processing the streamed response content. The unmarshalled GetObjectResponse and
* an InputStream to the response content are provided as parameters to the callback. The callback may return
* a transformed type which will be the return value of this method. See
* {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this interface
* and for links to pre-canned implementations for common scenarios like downloading to a file. The service
* documentation for the response content is as follows '
*
* Object data.
*
* '.
* @return The transformed result of the ResponseTransformer.
* @throws NoSuchKeyException
* The specified key does not exist.
* @throws InvalidObjectStateException
* Object is archived and inaccessible until restored.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetObject
*/
@Override
public ReturnT getObject(GetObjectRequest getObjectRequest,
ResponseTransformer responseTransformer) throws NoSuchKeyException,
InvalidObjectStateException, AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
GetObjectResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true));
HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler();
List metricPublishers = resolveMetricPublishers(clientConfiguration, getObjectRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetObject");
return clientHandler.execute(
new ClientExecutionParams().withOperationName("GetObject")
.withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler)
.withInput(getObjectRequest).withMetricCollector(apiCallMetricCollector)
.withMarshaller(new GetObjectRequestMarshaller(protocolFactory)), responseTransformer);
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP
* access to the object.
*
*
* This action is not supported by Amazon S3 on Outposts.
*
*
* Versioning
*
*
* By default, GET returns ACL information about the current version of an object. To return ACL information about a
* different version, use the versionId subresource.
*
*
* The following operations are related to GetObjectAcl
:
*
*
* -
*
* GetObject
*
*
* -
*
* DeleteObject
*
*
* -
*
* PutObject
*
*
*
*
* @param getObjectAclRequest
* @return Result of the GetObjectAcl operation returned by the service.
* @throws NoSuchKeyException
* The specified key does not exist.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetObjectAcl
*/
@Override
public GetObjectAclResponse getObjectAcl(GetObjectAclRequest getObjectAclRequest) throws NoSuchKeyException,
AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetObjectAclResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getObjectAclRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetObjectAcl");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetObjectAcl").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getObjectAclRequest)
.withMarshaller(new GetObjectAclRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Gets an object's current Legal Hold status. For more information, see Locking Objects.
*
*
* This action is not supported by Amazon S3 on Outposts.
*
*
* @param getObjectLegalHoldRequest
* @return Result of the GetObjectLegalHold operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetObjectLegalHold
*/
@Override
public GetObjectLegalHoldResponse getObjectLegalHold(GetObjectLegalHoldRequest getObjectLegalHoldRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetObjectLegalHoldResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getObjectLegalHoldRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetObjectLegalHold");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetObjectLegalHold").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getObjectLegalHoldRequest)
.withMarshaller(new GetObjectLegalHoldRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be
* applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
*
*
* @param getObjectLockConfigurationRequest
* @return Result of the GetObjectLockConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetObjectLockConfiguration
*/
@Override
public GetObjectLockConfigurationResponse getObjectLockConfiguration(
GetObjectLockConfigurationRequest getObjectLockConfigurationRequest) throws AwsServiceException, SdkClientException,
S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetObjectLockConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getObjectLockConfigurationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetObjectLockConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("GetObjectLockConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getObjectLockConfigurationRequest)
.withMarshaller(new GetObjectLockConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Retrieves an object's retention settings. For more information, see Locking Objects.
*
*
* This action is not supported by Amazon S3 on Outposts.
*
*
* @param getObjectRetentionRequest
* @return Result of the GetObjectRetention operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetObjectRetention
*/
@Override
public GetObjectRetentionResponse getObjectRetention(GetObjectRetentionRequest getObjectRetentionRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetObjectRetentionResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getObjectRetentionRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetObjectRetention");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetObjectRetention").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getObjectRetentionRequest)
.withMarshaller(new GetObjectRetentionRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the
* object.
*
*
* To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By
* default, the GET operation returns information about current version of an object. For a versioned bucket, you
* can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId
* query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
*
*
* By default, the bucket owner has this permission and can grant this permission to others.
*
*
* For information about the Amazon S3 object tagging feature, see Object Tagging.
*
*
* The following operation is related to GetObjectTagging
:
*
*
* -
*
* PutObjectTagging
*
*
*
*
* @param getObjectTaggingRequest
* @return Result of the GetObjectTagging operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetObjectTagging
*/
@Override
public GetObjectTaggingResponse getObjectTagging(GetObjectTaggingRequest getObjectTaggingRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
GetObjectTaggingResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getObjectTaggingRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetObjectTagging");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetObjectTagging").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getObjectTaggingRequest)
.withMarshaller(new GetObjectTaggingRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For
* more information about BitTorrent, see Using BitTorrent with Amazon S3.
*
*
*
* You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using
* server-side encryption with a customer-provided encryption key.
*
*
*
* To use GET, you must have READ access to the object.
*
*
* This action is not supported by Amazon S3 on Outposts.
*
*
* The following operation is related to GetObjectTorrent
:
*
*
* -
*
* GetObject
*
*
*
*
* @param getObjectTorrentRequest
* @param responseTransformer
* Functional interface for processing the streamed response content. The unmarshalled
* GetObjectTorrentResponse and an InputStream to the response content are provided as parameters to the
* callback. The callback may return a transformed type which will be the return value of this method. See
* {@link software.amazon.awssdk.core.sync.ResponseTransformer} for details on implementing this interface
* and for links to pre-canned implementations for common scenarios like downloading to a file. The service
* documentation for the response content is as follows '
*
* A Bencoded dictionary as defined by the BitTorrent specification
*
* '.
* @return The transformed result of the ResponseTransformer.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetObjectTorrent
*/
@Override
public ReturnT getObjectTorrent(GetObjectTorrentRequest getObjectTorrentRequest,
ResponseTransformer responseTransformer) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(
GetObjectTorrentResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(true));
HttpResponseHandler errorResponseHandler = protocolFactory.createErrorResponseHandler();
List metricPublishers = resolveMetricPublishers(clientConfiguration, getObjectTorrentRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetObjectTorrent");
return clientHandler.execute(
new ClientExecutionParams()
.withOperationName("GetObjectTorrent").withResponseHandler(responseHandler)
.withErrorResponseHandler(errorResponseHandler).withInput(getObjectTorrentRequest)
.withMetricCollector(apiCallMetricCollector)
.withMarshaller(new GetObjectTorrentRequestMarshaller(protocolFactory)), responseTransformer);
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Retrieves the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you
* must have the s3:GetBucketPublicAccessBlock
permission. For more information about Amazon S3
* permissions, see Specifying
* Permissions in a Policy.
*
*
*
* When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks
* the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and
* the bucket owner's account. If the PublicAccessBlock
settings are different between the bucket and
* the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
*
*
*
* For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of "Public".
*
*
* The following operations are related to GetPublicAccessBlock
:
*
*
* -
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param getPublicAccessBlockRequest
* @return Result of the GetPublicAccessBlock operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.GetPublicAccessBlock
*/
@Override
public GetPublicAccessBlockResponse getPublicAccessBlock(GetPublicAccessBlockRequest getPublicAccessBlockRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(GetPublicAccessBlockResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, getPublicAccessBlockRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetPublicAccessBlock");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("GetPublicAccessBlock").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(getPublicAccessBlockRequest)
.withMarshaller(new GetPublicAccessBlockRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This operation is useful to determine if a bucket exists and you have permission to access it. The operation
* returns a 200 OK
if the bucket exists and you have permission to access it. Otherwise, the operation
* might return responses such as 404 Not Found
and 403 Forbidden
.
*
*
* To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket
* owner has this permission by default and can grant this permission to others. For more information about
* permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* @param headBucketRequest
* @return Result of the HeadBucket operation returned by the service.
* @throws NoSuchBucketException
* The specified bucket does not exist.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.HeadBucket
*/
@Override
public HeadBucketResponse headBucket(HeadBucketRequest headBucketRequest) throws NoSuchBucketException, AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
HeadBucketResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, headBucketRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "HeadBucket");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("HeadBucket").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(headBucketRequest)
.withMarshaller(new HeadBucketRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* The HEAD operation retrieves metadata from an object without returning the object itself. This operation is
* useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
*
*
* A HEAD
request has the same options as a GET
operation on an object. The response is
* identical to the GET
response except that there is no response body.
*
*
* If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you
* store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following
* headers:
*
*
* -
*
* x-amz-server-side-encryption-customer-algorithm
*
*
* -
*
* x-amz-server-side-encryption-customer-key
*
*
* -
*
* x-amz-server-side-encryption-customer-key-MD5
*
*
*
*
* For more information about SSE-C, see Server-Side
* Encryption (Using Customer-Provided Encryption Keys).
*
*
*
* Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests
* if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with
* Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400
* BadRequest error.
*
*
*
* Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
*
*
* Consider the following when using request headers:
*
*
* -
*
* Consideration 1 – If both of the If-Match
and If-Unmodified-Since
headers are present
* in the request as follows:
*
*
* -
*
* If-Match
condition evaluates to true
, and;
*
*
* -
*
* If-Unmodified-Since
condition evaluates to false
;
*
*
*
*
* Then Amazon S3 returns 200 OK
and the data requested.
*
*
* -
*
* Consideration 2 – If both of the If-None-Match
and If-Modified-Since
headers are
* present in the request as follows:
*
*
* -
*
* If-None-Match
condition evaluates to false
, and;
*
*
* -
*
* If-Modified-Since
condition evaluates to true
;
*
*
*
*
* Then Amazon S3 returns the 304 Not Modified
response code.
*
*
*
*
* For more information about conditional requests, see RFC 7232.
*
*
* Permissions
*
*
* You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a
* Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also
* have the s3:ListBucket permission.
*
*
* -
*
* If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404
* ("no such key") error.
*
*
* -
*
* If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403
* ("access denied") error.
*
*
*
*
* The following operation is related to HeadObject
:
*
*
* -
*
* GetObject
*
*
*
*
* @param headObjectRequest
* @return Result of the HeadObject operation returned by the service.
* @throws NoSuchKeyException
* The specified key does not exist.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.HeadObject
*/
@Override
public HeadObjectResponse headObject(HeadObjectRequest headObjectRequest) throws NoSuchKeyException, AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
HeadObjectResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, headObjectRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "HeadObject");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("HeadObject").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(headObjectRequest)
.withMarshaller(new HeadObjectRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
*
*
* This operation supports list pagination and does not return more than 100 configurations at a time. You should
* always check the IsTruncated
element in the response. If there are no more configurations to list,
* IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is
* set to true, and there will be a value in NextContinuationToken
. You use the
* NextContinuationToken
value to continue the pagination of the list by passing the value in
* continuation-token in the request to GET
the next page.
*
*
* To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage
* Class Analysis.
*
*
* The following operations are related to ListBucketAnalyticsConfigurations
:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param listBucketAnalyticsConfigurationsRequest
* @return Result of the ListBucketAnalyticsConfigurations operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListBucketAnalyticsConfigurations
*/
@Override
public ListBucketAnalyticsConfigurationsResponse listBucketAnalyticsConfigurations(
ListBucketAnalyticsConfigurationsRequest listBucketAnalyticsConfigurationsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(ListBucketAnalyticsConfigurationsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
listBucketAnalyticsConfigurationsRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListBucketAnalyticsConfigurations");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("ListBucketAnalyticsConfigurations").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listBucketAnalyticsConfigurationsRequest)
.withMarshaller(new ListBucketAnalyticsConfigurationsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Lists the S3 Intelligent-Tiering configuration from the specified bucket.
*
*
* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to
* the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering
* delivers automatic cost savings by moving data between access tiers, when access patterns change.
*
*
* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at
* least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects
* can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering
* storage class.
*
*
* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30
* days. For more information, see Storage
* class for automatically optimizing frequently and infrequently accessed objects.
*
*
* Operations related to ListBucketIntelligentTieringConfigurations
include:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param listBucketIntelligentTieringConfigurationsRequest
* @return Result of the ListBucketIntelligentTieringConfigurations operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListBucketIntelligentTieringConfigurations
*/
@Override
public ListBucketIntelligentTieringConfigurationsResponse listBucketIntelligentTieringConfigurations(
ListBucketIntelligentTieringConfigurationsRequest listBucketIntelligentTieringConfigurationsRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(ListBucketIntelligentTieringConfigurationsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
listBucketIntelligentTieringConfigurationsRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListBucketIntelligentTieringConfigurations");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("ListBucketIntelligentTieringConfigurations")
.withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector)
.withInput(listBucketIntelligentTieringConfigurationsRequest)
.withMarshaller(new ListBucketIntelligentTieringConfigurationsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per
* bucket.
*
*
* This operation supports list pagination and does not return more than 100 configurations at a time. Always check
* the IsTruncated
element in the response. If there are no more configurations to list,
* IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is
* set to true, and there is a value in NextContinuationToken
. You use the
* NextContinuationToken
value to continue the pagination of the list by passing the value in
* continuation-token in the request to GET
the next page.
*
*
* To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
*
*
* The following operations are related to ListBucketInventoryConfigurations
:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param listBucketInventoryConfigurationsRequest
* @return Result of the ListBucketInventoryConfigurations operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListBucketInventoryConfigurations
*/
@Override
public ListBucketInventoryConfigurationsResponse listBucketInventoryConfigurations(
ListBucketInventoryConfigurationsRequest listBucketInventoryConfigurationsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(ListBucketInventoryConfigurationsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
listBucketInventoryConfigurationsRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListBucketInventoryConfigurations");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("ListBucketInventoryConfigurations").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listBucketInventoryConfigurationsRequest)
.withMarshaller(new ListBucketInventoryConfigurationsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of
* the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per
* bucket.
*
*
* This operation supports list pagination and does not return more than 100 configurations at a time. Always check
* the IsTruncated
element in the response. If there are no more configurations to list,
* IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is
* set to true, and there is a value in NextContinuationToken
. You use the
* NextContinuationToken
value to continue the pagination of the list by passing the value in
* continuation-token
in the request to GET
the next page.
*
*
* To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon
* CloudWatch.
*
*
* The following operations are related to ListBucketMetricsConfigurations
:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param listBucketMetricsConfigurationsRequest
* @return Result of the ListBucketMetricsConfigurations operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListBucketMetricsConfigurations
*/
@Override
public ListBucketMetricsConfigurationsResponse listBucketMetricsConfigurations(
ListBucketMetricsConfigurationsRequest listBucketMetricsConfigurationsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(ListBucketMetricsConfigurationsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
listBucketMetricsConfigurationsRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListBucketMetricsConfigurations");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("ListBucketMetricsConfigurations").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listBucketMetricsConfigurationsRequest)
.withMarshaller(new ListBucketMetricsConfigurationsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns a list of all buckets owned by the authenticated sender of the request.
*
*
* @param listBucketsRequest
* @return Result of the ListBuckets operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListBuckets
*/
@Override
public ListBucketsResponse listBuckets(ListBucketsRequest listBucketsRequest) throws AwsServiceException, SdkClientException,
S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
ListBucketsResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, listBucketsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListBuckets");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("ListBuckets").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listBucketsRequest)
.withMarshaller(new ListBucketsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that
* has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.
*
*
* This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum
* number of uploads a response can include, which is also the default value. You can further limit the number of
* uploads in a response by specifying the max-uploads
parameter in the response. If additional
* multipart uploads satisfy the list criteria, the response will contain an IsTruncated
element with
* the value true. To list the additional multipart uploads, use the key-marker
and
* upload-id-marker
request parameters.
*
*
* In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload
* using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted
* in ascending order within each key by the upload initiation time.
*
*
* For more information on multipart uploads, see Uploading Objects Using Multipart
* Upload.
*
*
* For information on permissions required to use the multipart upload API, see Multipart Upload API and
* Permissions.
*
*
* The following operations are related to ListMultipartUploads
:
*
*
* -
*
*
* -
*
* UploadPart
*
*
* -
*
*
* -
*
* ListParts
*
*
* -
*
*
*
*
* @param listMultipartUploadsRequest
* @return Result of the ListMultipartUploads operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListMultipartUploads
*/
@Override
public ListMultipartUploadsResponse listMultipartUploads(ListMultipartUploadsRequest listMultipartUploadsRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(ListMultipartUploadsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, listMultipartUploadsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListMultipartUploads");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("ListMultipartUploads").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listMultipartUploadsRequest)
.withMarshaller(new ListMultipartUploadsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that
* has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.
*
*
* This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum
* number of uploads a response can include, which is also the default value. You can further limit the number of
* uploads in a response by specifying the max-uploads
parameter in the response. If additional
* multipart uploads satisfy the list criteria, the response will contain an IsTruncated
element with
* the value true. To list the additional multipart uploads, use the key-marker
and
* upload-id-marker
request parameters.
*
*
* In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload
* using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted
* in ascending order within each key by the upload initiation time.
*
*
* For more information on multipart uploads, see Uploading Objects Using Multipart
* Upload.
*
*
* For information on permissions required to use the multipart upload API, see Multipart Upload API and
* Permissions.
*
*
* The following operations are related to ListMultipartUploads
:
*
*
* -
*
*
* -
*
* UploadPart
*
*
* -
*
*
* -
*
* ListParts
*
*
* -
*
*
*
*
*
* This is a variant of
* {@link #listMultipartUploads(software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest)} operation.
* The return type is a custom iterable that can be used to iterate through all the pages. SDK will internally
* handle making service calls for you.
*
*
* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no
* guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response
* pages by making service calls until there are no pages left or your iteration stops. If there are errors in your
* request, you will see the failures only after you start iterating through the iterable.
*
*
*
* The following are few ways to iterate through the response pages:
*
* 1) Using a Stream
*
*
* {@code
* software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsIterable responses = client.listMultipartUploadsPaginator(request);
* responses.stream().forEach(....);
* }
*
*
* 2) Using For loop
*
*
* {
* @code
* software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsIterable responses = client
* .listMultipartUploadsPaginator(request);
* for (software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse response : responses) {
* // do something;
* }
* }
*
*
* 3) Use iterator directly
*
*
* {@code
* software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsIterable responses = client.listMultipartUploadsPaginator(request);
* responses.iterator().forEachRemaining(....);
* }
*
*
* Please notice that the configuration of MaxUploads won't limit the number of results you get with the
* paginator. It only limits the number of results in each page.
*
*
* Note: If you prefer to have control on service calls, use the
* {@link #listMultipartUploads(software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest)}
* operation.
*
*
* @param listMultipartUploadsRequest
* @return A custom iterable that can be used to iterate through all the response pages.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListMultipartUploads
*/
@Override
public ListMultipartUploadsIterable listMultipartUploadsPaginator(ListMultipartUploadsRequest listMultipartUploadsRequest)
throws AwsServiceException, SdkClientException, S3Exception {
return new ListMultipartUploadsIterable(this, applyPaginatorUserAgent(listMultipartUploadsRequest));
}
/**
*
* Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection
* criteria to return metadata about a subset of all the object versions.
*
*
*
* A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of
* the response and handle it appropriately.
*
*
*
* To use this operation, you must have READ access to the bucket.
*
*
* This action is not supported by Amazon S3 on Outposts.
*
*
* The following operations are related to ListObjectVersions
:
*
*
* -
*
* ListObjectsV2
*
*
* -
*
* GetObject
*
*
* -
*
* PutObject
*
*
* -
*
* DeleteObject
*
*
*
*
* @param listObjectVersionsRequest
* @return Result of the ListObjectVersions operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListObjectVersions
*/
@Override
public ListObjectVersionsResponse listObjectVersions(ListObjectVersionsRequest listObjectVersionsRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(ListObjectVersionsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, listObjectVersionsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListObjectVersions");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("ListObjectVersions").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listObjectVersionsRequest)
.withMarshaller(new ListObjectVersionsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection
* criteria to return metadata about a subset of all the object versions.
*
*
*
* A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of
* the response and handle it appropriately.
*
*
*
* To use this operation, you must have READ access to the bucket.
*
*
* This action is not supported by Amazon S3 on Outposts.
*
*
* The following operations are related to ListObjectVersions
:
*
*
* -
*
* ListObjectsV2
*
*
* -
*
* GetObject
*
*
* -
*
* PutObject
*
*
* -
*
* DeleteObject
*
*
*
*
*
* This is a variant of
* {@link #listObjectVersions(software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest)} operation. The
* return type is a custom iterable that can be used to iterate through all the pages. SDK will internally handle
* making service calls for you.
*
*
* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no
* guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response
* pages by making service calls until there are no pages left or your iteration stops. If there are errors in your
* request, you will see the failures only after you start iterating through the iterable.
*
*
*
* The following are few ways to iterate through the response pages:
*
* 1) Using a Stream
*
*
* {@code
* software.amazon.awssdk.services.s3.paginators.ListObjectVersionsIterable responses = client.listObjectVersionsPaginator(request);
* responses.stream().forEach(....);
* }
*
*
* 2) Using For loop
*
*
* {
* @code
* software.amazon.awssdk.services.s3.paginators.ListObjectVersionsIterable responses = client
* .listObjectVersionsPaginator(request);
* for (software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse response : responses) {
* // do something;
* }
* }
*
*
* 3) Use iterator directly
*
*
* {@code
* software.amazon.awssdk.services.s3.paginators.ListObjectVersionsIterable responses = client.listObjectVersionsPaginator(request);
* responses.iterator().forEachRemaining(....);
* }
*
*
* Please notice that the configuration of MaxKeys won't limit the number of results you get with the paginator.
* It only limits the number of results in each page.
*
*
* Note: If you prefer to have control on service calls, use the
* {@link #listObjectVersions(software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest)} operation.
*
*
* @param listObjectVersionsRequest
* @return A custom iterable that can be used to iterate through all the response pages.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListObjectVersions
*/
@Override
public ListObjectVersionsIterable listObjectVersionsPaginator(ListObjectVersionsRequest listObjectVersionsRequest)
throws AwsServiceException, SdkClientException, S3Exception {
return new ListObjectVersionsIterable(this, applyPaginatorUserAgent(listObjectVersionsRequest));
}
/**
*
* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection
* criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be
* sure to design your application to parse the contents of the response and handle it appropriately.
*
*
*
* This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing
* applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
*
*
*
* The following operations are related to ListObjects
:
*
*
* -
*
* ListObjectsV2
*
*
* -
*
* GetObject
*
*
* -
*
* PutObject
*
*
* -
*
* CreateBucket
*
*
* -
*
* ListBuckets
*
*
*
*
* @param listObjectsRequest
* @return Result of the ListObjects operation returned by the service.
* @throws NoSuchBucketException
* The specified bucket does not exist.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListObjects
*/
@Override
public ListObjectsResponse listObjects(ListObjectsRequest listObjectsRequest) throws NoSuchBucketException,
AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
ListObjectsResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, listObjectsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListObjects");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("ListObjects").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listObjectsRequest)
.withMarshaller(new ListObjectsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection
* criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or
* invalid XML. Make sure to design your application to parse the contents of the response and handle it
* appropriately.
*
*
* To use this operation, you must have READ access to the bucket.
*
*
* To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform
* the s3:ListBucket
action. The bucket owner has this permission by default and can grant this
* permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
*
* This section describes the latest revision of the API. We recommend that you use this revised API for application
* development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.
*
*
*
* To get a list of your buckets, see ListBuckets.
*
*
* The following operations are related to ListObjectsV2
:
*
*
* -
*
* GetObject
*
*
* -
*
* PutObject
*
*
* -
*
* CreateBucket
*
*
*
*
* @param listObjectsV2Request
* @return Result of the ListObjectsV2 operation returned by the service.
* @throws NoSuchBucketException
* The specified bucket does not exist.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListObjectsV2
*/
@Override
public ListObjectsV2Response listObjectsV2(ListObjectsV2Request listObjectsV2Request) throws NoSuchBucketException,
AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
ListObjectsV2Response::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, listObjectsV2Request
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListObjectsV2");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("ListObjectsV2").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listObjectsV2Request)
.withMarshaller(new ListObjectsV2RequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection
* criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or
* invalid XML. Make sure to design your application to parse the contents of the response and handle it
* appropriately.
*
*
* To use this operation, you must have READ access to the bucket.
*
*
* To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform
* the s3:ListBucket
action. The bucket owner has this permission by default and can grant this
* permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
*
* This section describes the latest revision of the API. We recommend that you use this revised API for application
* development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.
*
*
*
* To get a list of your buckets, see ListBuckets.
*
*
* The following operations are related to ListObjectsV2
:
*
*
* -
*
* GetObject
*
*
* -
*
* PutObject
*
*
* -
*
* CreateBucket
*
*
*
*
*
* This is a variant of {@link #listObjectsV2(software.amazon.awssdk.services.s3.model.ListObjectsV2Request)}
* operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will
* internally handle making service calls for you.
*
*
* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no
* guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response
* pages by making service calls until there are no pages left or your iteration stops. If there are errors in your
* request, you will see the failures only after you start iterating through the iterable.
*
*
*
* The following are few ways to iterate through the response pages:
*
* 1) Using a Stream
*
*
* {@code
* software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable responses = client.listObjectsV2Paginator(request);
* responses.stream().forEach(....);
* }
*
*
* 2) Using For loop
*
*
* {
* @code
* software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable responses = client.listObjectsV2Paginator(request);
* for (software.amazon.awssdk.services.s3.model.ListObjectsV2Response response : responses) {
* // do something;
* }
* }
*
*
* 3) Use iterator directly
*
*
* {@code
* software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable responses = client.listObjectsV2Paginator(request);
* responses.iterator().forEachRemaining(....);
* }
*
*
* Please notice that the configuration of MaxKeys won't limit the number of results you get with the paginator.
* It only limits the number of results in each page.
*
*
* Note: If you prefer to have control on service calls, use the
* {@link #listObjectsV2(software.amazon.awssdk.services.s3.model.ListObjectsV2Request)} operation.
*
*
* @param listObjectsV2Request
* @return A custom iterable that can be used to iterate through all the response pages.
* @throws NoSuchBucketException
* The specified bucket does not exist.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListObjectsV2
*/
@Override
public ListObjectsV2Iterable listObjectsV2Paginator(ListObjectsV2Request listObjectsV2Request) throws NoSuchBucketException,
AwsServiceException, SdkClientException, S3Exception {
return new ListObjectsV2Iterable(this, applyPaginatorUserAgent(listObjectsV2Request));
}
/**
*
* Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload
* ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload).
* This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You
* can restrict the number of parts returned by specifying the max-parts
request parameter. If your
* multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with
* the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests
* you can include the part-number-marker query string parameter and set its value to the
* NextPartNumberMarker
field value from the previous response.
*
*
* For more information on multipart uploads, see Uploading Objects Using Multipart
* Upload.
*
*
* For information on permissions required to use the multipart upload API, see Multipart Upload API and
* Permissions.
*
*
* The following operations are related to ListParts
:
*
*
* -
*
*
* -
*
* UploadPart
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param listPartsRequest
* @return Result of the ListParts operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListParts
*/
@Override
public ListPartsResponse listParts(ListPartsRequest listPartsRequest) throws AwsServiceException, SdkClientException,
S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
ListPartsResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, listPartsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListParts");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("ListParts").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(listPartsRequest)
.withMarshaller(new ListPartsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload
* ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload).
* This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You
* can restrict the number of parts returned by specifying the max-parts
request parameter. If your
* multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with
* the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests
* you can include the part-number-marker query string parameter and set its value to the
* NextPartNumberMarker
field value from the previous response.
*
*
* For more information on multipart uploads, see Uploading Objects Using Multipart
* Upload.
*
*
* For information on permissions required to use the multipart upload API, see Multipart Upload API and
* Permissions.
*
*
* The following operations are related to ListParts
:
*
*
* -
*
*
* -
*
* UploadPart
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
*
* This is a variant of {@link #listParts(software.amazon.awssdk.services.s3.model.ListPartsRequest)} operation. The
* return type is a custom iterable that can be used to iterate through all the pages. SDK will internally handle
* making service calls for you.
*
*
* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no
* guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response
* pages by making service calls until there are no pages left or your iteration stops. If there are errors in your
* request, you will see the failures only after you start iterating through the iterable.
*
*
*
* The following are few ways to iterate through the response pages:
*
* 1) Using a Stream
*
*
* {@code
* software.amazon.awssdk.services.s3.paginators.ListPartsIterable responses = client.listPartsPaginator(request);
* responses.stream().forEach(....);
* }
*
*
* 2) Using For loop
*
*
* {
* @code
* software.amazon.awssdk.services.s3.paginators.ListPartsIterable responses = client.listPartsPaginator(request);
* for (software.amazon.awssdk.services.s3.model.ListPartsResponse response : responses) {
* // do something;
* }
* }
*
*
* 3) Use iterator directly
*
*
* {@code
* software.amazon.awssdk.services.s3.paginators.ListPartsIterable responses = client.listPartsPaginator(request);
* responses.iterator().forEachRemaining(....);
* }
*
*
* Please notice that the configuration of MaxParts won't limit the number of results you get with the paginator.
* It only limits the number of results in each page.
*
*
* Note: If you prefer to have control on service calls, use the
* {@link #listParts(software.amazon.awssdk.services.s3.model.ListPartsRequest)} operation.
*
*
* @param listPartsRequest
* @return A custom iterable that can be used to iterate through all the response pages.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.ListParts
*/
@Override
public ListPartsIterable listPartsPaginator(ListPartsRequest listPartsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
return new ListPartsIterable(this, applyPaginatorUserAgent(listPartsRequest));
}
/**
*
* Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level
* feature that enables you to perform faster data transfers to Amazon S3.
*
*
* To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket
* owner has this permission by default. The bucket owner can grant this permission to others. For more information
* about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* The Transfer Acceleration state of a bucket can be set to one of the following two values:
*
*
* -
*
* Enabled – Enables accelerated data transfers to the bucket.
*
*
* -
*
* Suspended – Disables accelerated data transfers to the bucket.
*
*
*
*
* The
* GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.
*
*
* After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before
* the data transfer rates to the bucket increase.
*
*
* The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (".").
*
*
* For more information about transfer acceleration, see Transfer Acceleration.
*
*
* The following operations are related to PutBucketAccelerateConfiguration
:
*
*
* -
*
*
* -
*
* CreateBucket
*
*
*
*
* @param putBucketAccelerateConfigurationRequest
* @return Result of the PutBucketAccelerateConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketAccelerateConfiguration
*/
@Override
public PutBucketAccelerateConfigurationResponse putBucketAccelerateConfiguration(
PutBucketAccelerateConfigurationRequest putBucketAccelerateConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketAccelerateConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
putBucketAccelerateConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketAccelerateConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketAccelerateConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketAccelerateConfigurationRequest)
.withMarshaller(new PutBucketAccelerateConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a
* bucket, you must have WRITE_ACP
permission.
*
*
* You can use one of the following two ways to set a bucket's permissions:
*
*
* -
*
* Specify the ACL in the request body
*
*
* -
*
* Specify permissions using request headers
*
*
*
*
*
* You cannot specify access permission using both the body and the request headers.
*
*
*
* Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or
* the headers. For example, if you have an existing application that updates a bucket ACL using the request body,
* then you can continue to use that approach.
*
*
* Access Permissions
*
*
* You can set access permissions using one of the following methods:
*
*
* -
*
* Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs,
* known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned
* ACL name as the value of x-amz-acl
. If you use this header, you cannot use other access
* control-specific headers in your request. For more information, see Canned ACL.
*
*
* -
*
* Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
,
* x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers,
* you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the
* permission. If you use these ACL-specific headers, you cannot use the x-amz-acl
header to set a
* canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more
* information, see Access Control List
* (ACL) Overview.
*
*
* You specify each grantee as a type=value pair, where the type is one of the following:
*
*
* -
*
* id
– if the value specified is the canonical user ID of an AWS account
*
*
* -
*
* uri
– if you are granting permissions to a predefined group
*
*
* -
*
* emailAddress
– if the value specified is the email address of an AWS account
*
*
*
* Using email addresses to specify a grantee is only supported in the following AWS Regions:
*
*
* -
*
* US East (N. Virginia)
*
*
* -
*
* US West (N. California)
*
*
* -
*
* US West (Oregon)
*
*
* -
*
* Asia Pacific (Singapore)
*
*
* -
*
* Asia Pacific (Sydney)
*
*
* -
*
* Asia Pacific (Tokyo)
*
*
* -
*
* Europe (Ireland)
*
*
* -
*
* South America (São Paulo)
*
*
*
*
* For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS
* General Reference.
*
*
*
*
* For example, the following x-amz-grant-write
header grants create, overwrite, and delete objects
* permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses.
*
*
* x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", id="555566667777"
*
*
*
*
* You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
*
*
* Grantee Values
*
*
* You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the
* following ways:
*
*
* -
*
* By the person's ID:
*
*
* <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
*
*
* DisplayName is optional and ignored in the request
*
*
* -
*
* By URI:
*
*
* <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
*
*
* -
*
* By Email address:
*
*
* <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress>lt;/Grantee>
*
*
* The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the
* CanonicalUser.
*
*
*
* Using email addresses to specify a grantee is only supported in the following AWS Regions:
*
*
* -
*
* US East (N. Virginia)
*
*
* -
*
* US West (N. California)
*
*
* -
*
* US West (Oregon)
*
*
* -
*
* Asia Pacific (Singapore)
*
*
* -
*
* Asia Pacific (Sydney)
*
*
* -
*
* Asia Pacific (Tokyo)
*
*
* -
*
* Europe (Ireland)
*
*
* -
*
* South America (São Paulo)
*
*
*
*
* For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS
* General Reference.
*
*
*
*
* Related Resources
*
*
* -
*
* CreateBucket
*
*
* -
*
* DeleteBucket
*
*
* -
*
* GetObjectAcl
*
*
*
*
* @param putBucketAclRequest
* @return Result of the PutBucketAcl operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketAcl
*/
@Override
public PutBucketAclResponse putBucketAcl(PutBucketAclRequest putBucketAclRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
PutBucketAclResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketAclRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketAcl");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketAcl").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketAclRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketAclRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to
* 1,000 analytics configurations per bucket.
*
*
* You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat
* file. See the DataExport
request element. Reports are updated daily and are based on the object
* filters that you configure. When selecting data export, you specify a destination bucket and an optional
* destination prefix where the file is written. You can export the data to a destination bucket in a different
* account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT
* analytics configuration to. For more information, see Amazon S3 Analytics – Storage
* Class Analysis.
*
*
*
* You must create a bucket policy on the destination bucket where the exported file is written to grant permissions
* to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
*
*
*
* To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* Special Errors
*
*
* -
*
* -
*
* HTTP Error: HTTP 400 Bad Request
*
*
* -
*
* Code: InvalidArgument
*
*
* -
*
* Cause: Invalid argument.
*
*
*
*
* -
*
* -
*
* HTTP Error: HTTP 400 Bad Request
*
*
* -
*
* Code: TooManyConfigurations
*
*
* -
*
* Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration
* limit.
*
*
*
*
* -
*
* -
*
* HTTP Error: HTTP 403 Forbidden
*
*
* -
*
* Code: AccessDenied
*
*
* -
*
* Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration
* bucket permission to set the configuration on the bucket.
*
*
*
*
*
*
* Related Resources
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param putBucketAnalyticsConfigurationRequest
* @return Result of the PutBucketAnalyticsConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketAnalyticsConfiguration
*/
@Override
public PutBucketAnalyticsConfigurationResponse putBucketAnalyticsConfiguration(
PutBucketAnalyticsConfigurationRequest putBucketAnalyticsConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketAnalyticsConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
putBucketAnalyticsConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketAnalyticsConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketAnalyticsConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketAnalyticsConfigurationRequest)
.withMarshaller(new PutBucketAnalyticsConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
*
*
* To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the
* bucket owner has this permission and can grant it to others.
*
*
* You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you
* might want to enable a request whose origin is http://www.example.com
to access your Amazon S3
* bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
*
*
* To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the
* bucket. The cors
subresource is an XML document in which you configure rules that identify origins
* and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
*
*
* When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates
* the cors
configuration on the bucket and uses the first CORSRule
rule that matches the
* incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be
* met:
*
*
* -
*
* The request's Origin
header must match AllowedOrigin
elements.
*
*
* -
*
* The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
* header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
* elements.
*
*
* -
*
* Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request
* must match an AllowedHeader
element.
*
*
*
*
* For more information about CORS, go to Enabling Cross-Origin Resource Sharing in
* the Amazon Simple Storage Service Developer Guide.
*
*
* Related Resources
*
*
* -
*
* GetBucketCors
*
*
* -
*
* DeleteBucketCors
*
*
* -
*
*
*
*
* @param putBucketCorsRequest
* @return Result of the PutBucketCors operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketCors
*/
@Override
public PutBucketCorsResponse putBucketCors(PutBucketCorsRequest putBucketCorsRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
PutBucketCorsResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketCorsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketCors");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketCors").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketCorsRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketCorsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This implementation of the PUT
operation uses the encryption
subresource to set the
* default encryption state of an existing bucket.
*
*
* This implementation of the PUT
operation sets default encryption for a bucket using server-side
* encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS). For information
* about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket
* Encryption.
*
*
*
* This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).
*
*
*
* To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
* action. The bucket owner has this permission by default. The bucket owner can grant this permission to others.
* For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
*
*
* Related Resources
*
*
* -
*
*
* -
*
*
*
*
* @param putBucketEncryptionRequest
* @return Result of the PutBucketEncryption operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketEncryption
*/
@Override
public PutBucketEncryptionResponse putBucketEncryption(PutBucketEncryptionRequest putBucketEncryptionRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketEncryptionResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketEncryptionRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketEncryption");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketEncryption").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketEncryptionRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketEncryptionRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Puts a S3 Intelligent-Tiering configuration to the specified bucket.
*
*
* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to
* the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering
* delivers automatic cost savings by moving data between access tiers, when access patterns change.
*
*
* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at
* least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects
* can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering
* storage class.
*
*
* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30
* days. For more information, see Storage
* class for automatically optimizing frequently and infrequently accessed objects.
*
*
* Operations related to PutBucketIntelligentTieringConfiguration
include:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param putBucketIntelligentTieringConfigurationRequest
* @return Result of the PutBucketIntelligentTieringConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketIntelligentTieringConfiguration
*/
@Override
public PutBucketIntelligentTieringConfigurationResponse putBucketIntelligentTieringConfiguration(
PutBucketIntelligentTieringConfigurationRequest putBucketIntelligentTieringConfigurationRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketIntelligentTieringConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
putBucketIntelligentTieringConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketIntelligentTieringConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketIntelligentTieringConfiguration")
.withCombinedResponseHandler(responseHandler).withMetricCollector(apiCallMetricCollector)
.withInput(putBucketIntelligentTieringConfigurationRequest)
.withMarshaller(new PutBucketIntelligentTieringConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* This implementation of the PUT
operation adds an inventory configuration (identified by the
* inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
*
*
* Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the
* results are published to a flat file. The bucket that is inventoried is called the source bucket, and the
* bucket where the inventory flat file is stored is called the destination bucket. The destination
* bucket must be in the same AWS Region as the source bucket.
*
*
* When you configure an inventory for a source bucket, you specify the destination bucket where you
* want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure
* what object metadata to include and whether to inventory all object versions or only current versions. For more
* information, see Amazon S3
* Inventory in the Amazon Simple Storage Service Developer Guide.
*
*
*
* You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write
* objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
*
*
*
* To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action.
* The bucket owner has this permission by default and can grant this permission to others. For more information
* about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
*
*
* Special Errors
*
*
* -
*
* HTTP 400 Bad Request Error
*
*
* -
*
* Code: InvalidArgument
*
*
* -
*
* Cause: Invalid Argument
*
*
*
*
* -
*
* HTTP 400 Bad Request Error
*
*
* -
*
* Code: TooManyConfigurations
*
*
* -
*
* Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration
* limit.
*
*
*
*
* -
*
* HTTP 403 Forbidden Error
*
*
* -
*
* Code: AccessDenied
*
*
* -
*
* Cause: You are not the owner of the specified bucket, or you do not have the
* s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
*
*
*
*
*
*
* Related Resources
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param putBucketInventoryConfigurationRequest
* @return Result of the PutBucketInventoryConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketInventoryConfiguration
*/
@Override
public PutBucketInventoryConfigurationResponse putBucketInventoryConfiguration(
PutBucketInventoryConfigurationRequest putBucketInventoryConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketInventoryConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
putBucketInventoryConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketInventoryConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketInventoryConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketInventoryConfigurationRequest)
.withMarshaller(new PutBucketInventoryConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For
* information about lifecycle configuration, see Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
*
* Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or
* more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous
* version of the API supported filtering based only on an object key name prefix, which is supported for backward
* compatibility. For the related API description, see PutBucketLifecycle.
*
*
*
* Rules
*
*
* You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML
* consisting of one or more rules. Each rule consists of the following:
*
*
* -
*
* Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix,
* object tags, or a combination of both.
*
*
* -
*
* Status whether the rule is in effect.
*
*
* -
*
* One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects
* identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have
* many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides
* predefined actions that you can specify for current and noncurrent object versions.
*
*
*
*
* For more information, see Object Lifecycle Management
* and Lifecycle Configuration
* Elements.
*
*
* Permissions
*
*
* By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for
* example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account
* that created it) can access the resource. The resource owner can optionally grant access permissions to others by
* writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.
*
*
* You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to
* block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for
* the following actions:
*
*
* -
*
* s3:DeleteObject
*
*
* -
*
* s3:DeleteObjectVersion
*
*
* -
*
* s3:PutLifecycleConfiguration
*
*
*
*
* For more information about permissions, see Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* The following are related to PutBucketLifecycleConfiguration
:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* @param putBucketLifecycleConfigurationRequest
* @return Result of the PutBucketLifecycleConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketLifecycleConfiguration
*/
@Override
public PutBucketLifecycleConfigurationResponse putBucketLifecycleConfiguration(
PutBucketLifecycleConfigurationRequest putBucketLifecycleConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketLifecycleConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
putBucketLifecycleConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketLifecycleConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketLifecycleConfiguration")
.withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector)
.withInput(putBucketLifecycleConfigurationRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED,
HttpChecksumRequired.create())
.withMarshaller(new PutBucketLifecycleConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging
* parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status
* of a bucket, you must be the bucket owner.
*
*
* The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee
request
* element to grant access to other people. The Permissions
request element specifies the kind of
* access the grantee has to the logs.
*
*
* Grantee Values
*
*
* You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the
* following ways:
*
*
* -
*
* By the person's ID:
*
*
* <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
*
*
* DisplayName is optional and ignored in the request.
*
*
* -
*
* By Email address:
*
*
* <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress></Grantee>
*
*
* The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the
* CanonicalUser.
*
*
* -
*
* By URI:
*
*
* <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
*
*
*
*
* To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty
* BucketLoggingStatus request element:
*
*
* <BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
*
*
* For more information about server access logging, see Server Access Logging.
*
*
* For more information about creating a bucket, see CreateBucket. For more
* information about returning the logging status of a bucket, see GetBucketLogging.
*
*
* The following operations are related to PutBucketLogging
:
*
*
* -
*
* PutObject
*
*
* -
*
* DeleteBucket
*
*
* -
*
* CreateBucket
*
*
* -
*
* GetBucketLogging
*
*
*
*
* @param putBucketLoggingRequest
* @return Result of the PutBucketLogging operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketLogging
*/
@Override
public PutBucketLoggingResponse putBucketLogging(PutBucketLoggingRequest putBucketLoggingRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
PutBucketLoggingResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketLoggingRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketLogging");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketLogging").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketLoggingRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketLoggingRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000
* metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full
* replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are
* erased.
*
*
* To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action.
* The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more
* information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon
* CloudWatch.
*
*
* The following operations are related to PutBucketMetricsConfiguration
:
*
*
* -
*
*
* -
*
*
* -
*
*
*
*
* GetBucketLifecycle
has the following special error:
*
*
* -
*
* Error code: TooManyConfigurations
*
*
* -
*
* Description: You are attempting to create a new configuration but have already reached the 1,000-configuration
* limit.
*
*
* -
*
* HTTP Status Code: HTTP 400 Bad Request
*
*
*
*
*
*
* @param putBucketMetricsConfigurationRequest
* @return Result of the PutBucketMetricsConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketMetricsConfiguration
*/
@Override
public PutBucketMetricsConfigurationResponse putBucketMetricsConfiguration(
PutBucketMetricsConfigurationRequest putBucketMetricsConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketMetricsConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
putBucketMetricsConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketMetricsConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketMetricsConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketMetricsConfigurationRequest)
.withMarshaller(new PutBucketMetricsConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event
* Notifications.
*
*
* Using this API, you can replace an existing notification configuration. The configuration is an XML file that
* defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to
* publish an event notification when it detects an event of the specified type.
*
*
* By default, your bucket has no event notifications configured. That is, the notification configuration will be an
* empty NotificationConfiguration
.
*
*
* <NotificationConfiguration>
*
*
* </NotificationConfiguration>
*
*
* This operation replaces the existing notification configuration with the configuration you include in the request
* body.
*
*
* After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS)
* or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to
* publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the
* Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more
* information, see Configuring
* Notifications for Amazon S3 Events.
*
*
* You can disable notifications by adding the empty NotificationConfiguration element.
*
*
* By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a
* bucket policy to grant permission to other users to set this configuration with
* s3:PutBucketNotification
permission.
*
*
*
* The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS
* topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon
* S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3
* will not add the configuration to your bucket.
*
*
*
* Responses
*
*
* If the configuration in the request body includes only one TopicConfiguration
specifying only the
* s3:ReducedRedundancyLostObject
event type, the response will also include the
* x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the
* topic.
*
*
* The following operation is related to PutBucketNotificationConfiguration
:
*
*
* -
*
*
*
*
* @param putBucketNotificationConfigurationRequest
* @return Result of the PutBucketNotificationConfiguration operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketNotificationConfiguration
*/
@Override
public PutBucketNotificationConfigurationResponse putBucketNotificationConfiguration(
PutBucketNotificationConfigurationRequest putBucketNotificationConfigurationRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketNotificationConfigurationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration,
putBucketNotificationConfigurationRequest.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketNotificationConfiguration");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketNotificationConfiguration").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketNotificationConfigurationRequest)
.withMarshaller(new PutBucketNotificationConfigurationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Creates or modifies OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have
* the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see
* Specifying Permissions in a
* Policy.
*
*
* For information about Amazon S3 Object Ownership, see Using Object Ownership.
*
*
* The following operations are related to PutBucketOwnershipControls
:
*
*
* -
*
*
* -
*
*
*
*
* @param putBucketOwnershipControlsRequest
* @return Result of the PutBucketOwnershipControls operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketOwnershipControls
*/
@Override
public PutBucketOwnershipControlsResponse putBucketOwnershipControls(
PutBucketOwnershipControlsRequest putBucketOwnershipControlsRequest) throws AwsServiceException, SdkClientException,
S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketOwnershipControlsResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketOwnershipControlsRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketOwnershipControls");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketOwnershipControls").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketOwnershipControlsRequest)
.withMarshaller(new PutBucketOwnershipControlsRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user
* of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy
* permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
*
*
* If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
* error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's
* account, Amazon S3 returns a 405 Method Not Allowed
error.
*
*
*
* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even
* if the policy explicitly denies the root user the ability to perform this action.
*
*
*
* For more information about bucket policies, see Using Bucket Policies and User
* Policies.
*
*
* The following operations are related to PutBucketPolicy
:
*
*
* -
*
* CreateBucket
*
*
* -
*
* DeleteBucket
*
*
*
*
* @param putBucketPolicyRequest
* @return Result of the PutBucketPolicy operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketPolicy
*/
@Override
public PutBucketPolicyResponse putBucketPolicy(PutBucketPolicyRequest putBucketPolicyRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
PutBucketPolicyResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketPolicyRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketPolicy");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketPolicy").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketPolicyRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketPolicyRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3
* Developer Guide.
*
*
*
* To perform this operation, the user or role performing the operation must have the iam:PassRole permission.
*
*
*
* Specify the replication configuration in the request body. In the replication configuration, you provide the name
* of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume
* to replicate objects on your behalf, and other relevant information.
*
*
* A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule
* identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional
* subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket.
*
*
* To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as
* a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or
* both. When you add the Filter element in the configuration, you must also add the following elements:
* DeleteMarkerReplication
, Status
, and Priority
.
*
*
*
* The latest version of the replication configuration XML is V2. XML V2 replication configurations are those that
* contain the Filter
element for rules, and rules that specify S3 Replication Time Control (S3 RTC).
* In XML V2 replication configurations, Amazon S3 doesn't replicate delete markers. Therefore, you must set the
* DeleteMarkerReplication
element to Disabled
. For backward compatibility, Amazon S3
* continues to support the XML V1 replication configuration.
*
*
*
* For information about enabling versioning on a bucket, see Using Versioning.
*
*
* By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation.
* The resource owner can also grant others permissions to perform the operation. For more information about
* permissions, see Specifying
* Permissions in a Policy and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* Handling Replication of Encrypted Objects
*
*
* By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs
* stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following:
* SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
,
* EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication
* configuration, see Replicating
* Objects Created with SSE Using CMKs stored in AWS KMS.
*
*
* For information on PutBucketReplication
errors, see List of
* replication-related error codes
*
*
* The following operations are related to PutBucketReplication
:
*
*
* -
*
*
* -
*
*
*
*
* @param putBucketReplicationRequest
* @return Result of the PutBucketReplication operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketReplication
*/
@Override
public PutBucketReplicationResponse putBucketReplication(PutBucketReplicationRequest putBucketReplicationRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketReplicationResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketReplicationRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketReplication");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketReplication").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketReplicationRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketReplicationRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the
* bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the
* download will be charged for the download. For more information, see Requester Pays Buckets.
*
*
* The following operations are related to PutBucketRequestPayment
:
*
*
* -
*
* CreateBucket
*
*
* -
*
*
*
*
* @param putBucketRequestPaymentRequest
* @return Result of the PutBucketRequestPayment operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketRequestPayment
*/
@Override
public PutBucketRequestPaymentResponse putBucketRequestPayment(PutBucketRequestPaymentRequest putBucketRequestPaymentRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketRequestPaymentResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketRequestPaymentRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketRequestPayment");
return clientHandler
.execute(new ClientExecutionParams()
.withOperationName("PutBucketRequestPayment")
.withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector)
.withInput(putBucketRequestPaymentRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED,
HttpChecksumRequired.create())
.withMarshaller(new PutBucketRequestPaymentRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Sets the tags for a bucket.
*
*
* Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS
* account bill with tag key values included. Then, to see the cost of combined resources, organize your billing
* information according to resources with the same tag key values. For example, you can tag several resources with
* a specific application name, and then organize your billing information to see the total cost of that application
* across several services. For more information, see Cost Allocation and
* Tagging.
*
*
*
* Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old
* value. For more information, see Using Cost Allocation in Amazon S3
* Bucket Tags.
*
*
*
* To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The
* bucket owner has this permission by default and can grant this permission to others. For more information about
* permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your
* Amazon S3 Resources.
*
*
* PutBucketTagging
has the following special errors:
*
*
* -
*
* Error code: InvalidTagError
*
*
* -
*
* Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation.
* For information about tag restrictions, see User-Defined
* Tag Restrictions and AWS-Generated Cost
* Allocation Tag Restrictions.
*
*
*
*
* -
*
* Error code: MalformedXMLError
*
*
* -
*
* Description: The XML provided does not match the schema.
*
*
*
*
* -
*
* Error code: OperationAbortedError
*
*
* -
*
* Description: A conflicting conditional operation is currently in progress against this resource. Please try
* again.
*
*
*
*
* -
*
* Error code: InternalError
*
*
* -
*
* Description: The service was unable to apply the provided tag to the bucket.
*
*
*
*
*
*
* The following operations are related to PutBucketTagging
:
*
*
* -
*
* GetBucketTagging
*
*
* -
*
*
*
*
* @param putBucketTaggingRequest
* @return Result of the PutBucketTagging operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketTagging
*/
@Override
public PutBucketTaggingResponse putBucketTagging(PutBucketTaggingRequest putBucketTaggingRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
PutBucketTaggingResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketTaggingRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketTagging");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketTagging").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketTaggingRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketTaggingRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.
*
*
* You can set the versioning state with one of the following values:
*
*
* Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique
* version ID.
*
*
* Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the
* version ID null.
*
*
* If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning
* request does not return a versioning state value.
*
*
* If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the
* x-amz-mfa request
header and the Status
and the MfaDelete
request elements
* in a request to set the versioning state of the bucket.
*
*
*
* If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same
* permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent
* expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled
* bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more
* information, see Lifecycle and Versioning.
*
*
*
* Related Resources
*
*
* -
*
* CreateBucket
*
*
* -
*
* DeleteBucket
*
*
* -
*
*
*
*
* @param putBucketVersioningRequest
* @return Result of the PutBucketVersioning operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketVersioning
*/
@Override
public PutBucketVersioningResponse putBucketVersioning(PutBucketVersioningRequest putBucketVersioningRequest)
throws AwsServiceException, SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory
.createCombinedResponseHandler(PutBucketVersioningResponse::builder,
new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketVersioningRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketVersioning");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketVersioning").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketVersioningRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketVersioningRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Sets the configuration of the website that is specified in the website
subresource. To configure a
* bucket as a website, you can add this subresource on the bucket with website configuration information such as
* the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
*
*
* This PUT operation requires the S3:PutBucketWebsite
permission. By default, only the bucket owner
* can configure the website attached to a bucket; however, bucket owners can allow other users to set the website
* configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
*
*
* To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the
* following elements. Because all requests are sent to another website, you don't need to provide index document
* name for the bucket.
*
*
* -
*
* WebsiteConfiguration
*
*
* -
*
* RedirectAllRequestsTo
*
*
* -
*
* HostName
*
*
* -
*
* Protocol
*
*
*
*
* If you want granular control over redirects, you can use the following elements to add routing rules that
* describe conditions for redirecting requests and information about the redirect destination. In this case, the
* website configuration must provide an index document for the bucket, because some requests might not be
* redirected.
*
*
* -
*
* WebsiteConfiguration
*
*
* -
*
* IndexDocument
*
*
* -
*
* Suffix
*
*
* -
*
* ErrorDocument
*
*
* -
*
* Key
*
*
* -
*
* RoutingRules
*
*
* -
*
* RoutingRule
*
*
* -
*
* Condition
*
*
* -
*
* HttpErrorCodeReturnedEquals
*
*
* -
*
* KeyPrefixEquals
*
*
* -
*
* Redirect
*
*
* -
*
* Protocol
*
*
* -
*
* HostName
*
*
* -
*
* ReplaceKeyPrefixWith
*
*
* -
*
* ReplaceKeyWith
*
*
* -
*
* HttpRedirectCode
*
*
*
*
* Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing
* rules, you can use object redirect. For more information, see Configuring an Object
* Redirect in the Amazon Simple Storage Service Developer Guide.
*
*
* @param putBucketWebsiteRequest
* @return Result of the PutBucketWebsite operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutBucketWebsite
*/
@Override
public PutBucketWebsiteResponse putBucketWebsite(PutBucketWebsiteRequest putBucketWebsiteRequest) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
PutBucketWebsiteResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putBucketWebsiteRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutBucketWebsite");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutBucketWebsite").withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector).withInput(putBucketWebsiteRequest)
.putExecutionAttribute(SdkInternalExecutionAttribute.HTTP_CHECKSUM_REQUIRED, HttpChecksumRequired.create())
.withMarshaller(new PutBucketWebsiteRequestMarshaller(protocolFactory)));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.
*
*
* Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the
* bucket.
*
*
* Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it
* overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make
* sure to build it into your application layer or use versioning instead.
*
*
* To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you
* use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an
* error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag
* to the calculated MD5 value.
*
*
*
* The Content-MD5
header is required for any request to upload an object with a retention period
* configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock
* Overview in the Amazon Simple Storage Service Developer Guide.
*
*
*
* Server-side Encryption
*
*
* You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as
* it writes it to disks in its data centers and decrypts the data when you access it. You have the option to
* provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side
* Encryption.
*
*
* Access Control List (ACL)-Specific Request Headers
*
*
* You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full
* access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined
* groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see
* Access Control List (ACL)
* Overview and Managing ACLs
* Using the REST API.
*
*
* Storage Class Options
*
*
* By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class
* provides high durability and high availability. Depending on performance needs, you can specify a different
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the
* Amazon S3 Service Developer Guide.
*
*
* Versioning
*
*
* If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being
* stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives
* multiple write requests for the same object simultaneously, it stores all of the objects.
*
*
* For more information about versioning, see Adding
* Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see
* GetBucketVersioning.
*
*
* Related Resources
*
*
* -
*
* CopyObject
*
*
* -
*
* DeleteObject
*
*
*
*
* @param putObjectRequest
* @param requestBody
* The content to send to the service. A {@link RequestBody} can be created using one of several factory
* methods for various sources of data. For example, to create a request body from a file you can do the
* following.
*
*
* {@code RequestBody.fromFile(new File("myfile.txt"))}
*
*
* See documentation in {@link RequestBody} for additional details and which sources of data are supported.
* The service documentation for the request content is as follows '
*
* Object data.
*
* '
* @return Result of the PutObject operation returned by the service.
* @throws SdkException
* Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for
* catch all scenarios.
* @throws SdkClientException
* If any client side error occurs such as an IO related failure, failure to get credentials, etc.
* @throws S3Exception
* Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type.
* @sample S3Client.PutObject
*/
@Override
public PutObjectResponse putObject(PutObjectRequest putObjectRequest, RequestBody requestBody) throws AwsServiceException,
SdkClientException, S3Exception {
HttpResponseHandler> responseHandler = protocolFactory.createCombinedResponseHandler(
PutObjectResponse::builder, new XmlOperationMetadata().withHasStreamingSuccessResponse(false));
List metricPublishers = resolveMetricPublishers(clientConfiguration, putObjectRequest
.overrideConfiguration().orElse(null));
MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector
.create("ApiCall");
try {
apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "S3");
apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "PutObject");
return clientHandler.execute(new ClientExecutionParams()
.withOperationName("PutObject")
.withCombinedResponseHandler(responseHandler)
.withMetricCollector(apiCallMetricCollector)
.withInput(putObjectRequest)
.withRequestBody(requestBody)
.withMarshaller(
StreamingRequestMarshaller.builder()
.delegateMarshaller(new PutObjectRequestMarshaller(protocolFactory)).requestBody(requestBody)
.build()));
} finally {
metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect()));
}
}
/**
*
* Uses the