All Downloads are FREE. Search and download functionalities are using the official Maven repository.

software.amazon.awssdk.services.s3.S3AsyncClient Maven / Gradle / Ivy

/*
 * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */

package software.amazon.awssdk.services.s3;

import java.nio.file.Path;
import java.util.concurrent.CompletableFuture;
import java.util.function.Consumer;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.core.SdkClient;
import software.amazon.awssdk.core.async.AsyncRequestBody;
import software.amazon.awssdk.core.async.AsyncResponseTransformer;
import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.CopyObjectRequest;
import software.amazon.awssdk.services.s3.model.CopyObjectResponse;
import software.amazon.awssdk.services.s3.model.CreateBucketRequest;
import software.amazon.awssdk.services.s3.model.CreateBucketResponse;
import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketAnalyticsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketAnalyticsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketCorsRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketCorsResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketEncryptionResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketIntelligentTieringConfigurationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketIntelligentTieringConfigurationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketInventoryConfigurationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketInventoryConfigurationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketLifecycleRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketLifecycleResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketMetricsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketMetricsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketOwnershipControlsRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketOwnershipControlsResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketPolicyResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketReplicationRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketReplicationResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketTaggingRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketTaggingResponse;
import software.amazon.awssdk.services.s3.model.DeleteBucketWebsiteRequest;
import software.amazon.awssdk.services.s3.model.DeleteBucketWebsiteResponse;
import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
import software.amazon.awssdk.services.s3.model.DeleteObjectResponse;
import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingRequest;
import software.amazon.awssdk.services.s3.model.DeleteObjectTaggingResponse;
import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse;
import software.amazon.awssdk.services.s3.model.DeletePublicAccessBlockRequest;
import software.amazon.awssdk.services.s3.model.DeletePublicAccessBlockResponse;
import software.amazon.awssdk.services.s3.model.GetBucketAccelerateConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketAccelerateConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketAclRequest;
import software.amazon.awssdk.services.s3.model.GetBucketAclResponse;
import software.amazon.awssdk.services.s3.model.GetBucketAnalyticsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketAnalyticsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketCorsRequest;
import software.amazon.awssdk.services.s3.model.GetBucketCorsResponse;
import software.amazon.awssdk.services.s3.model.GetBucketEncryptionRequest;
import software.amazon.awssdk.services.s3.model.GetBucketEncryptionResponse;
import software.amazon.awssdk.services.s3.model.GetBucketIntelligentTieringConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketIntelligentTieringConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketInventoryConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketInventoryConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketLifecycleConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketLifecycleConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketLocationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketLocationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketLoggingRequest;
import software.amazon.awssdk.services.s3.model.GetBucketLoggingResponse;
import software.amazon.awssdk.services.s3.model.GetBucketMetricsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketMetricsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketNotificationConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketNotificationConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketOwnershipControlsRequest;
import software.amazon.awssdk.services.s3.model.GetBucketOwnershipControlsResponse;
import software.amazon.awssdk.services.s3.model.GetBucketPolicyRequest;
import software.amazon.awssdk.services.s3.model.GetBucketPolicyResponse;
import software.amazon.awssdk.services.s3.model.GetBucketPolicyStatusRequest;
import software.amazon.awssdk.services.s3.model.GetBucketPolicyStatusResponse;
import software.amazon.awssdk.services.s3.model.GetBucketReplicationRequest;
import software.amazon.awssdk.services.s3.model.GetBucketReplicationResponse;
import software.amazon.awssdk.services.s3.model.GetBucketRequestPaymentRequest;
import software.amazon.awssdk.services.s3.model.GetBucketRequestPaymentResponse;
import software.amazon.awssdk.services.s3.model.GetBucketTaggingRequest;
import software.amazon.awssdk.services.s3.model.GetBucketTaggingResponse;
import software.amazon.awssdk.services.s3.model.GetBucketVersioningRequest;
import software.amazon.awssdk.services.s3.model.GetBucketVersioningResponse;
import software.amazon.awssdk.services.s3.model.GetBucketWebsiteRequest;
import software.amazon.awssdk.services.s3.model.GetBucketWebsiteResponse;
import software.amazon.awssdk.services.s3.model.GetObjectAclRequest;
import software.amazon.awssdk.services.s3.model.GetObjectAclResponse;
import software.amazon.awssdk.services.s3.model.GetObjectLegalHoldRequest;
import software.amazon.awssdk.services.s3.model.GetObjectLegalHoldResponse;
import software.amazon.awssdk.services.s3.model.GetObjectLockConfigurationRequest;
import software.amazon.awssdk.services.s3.model.GetObjectLockConfigurationResponse;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.model.GetObjectResponse;
import software.amazon.awssdk.services.s3.model.GetObjectRetentionRequest;
import software.amazon.awssdk.services.s3.model.GetObjectRetentionResponse;
import software.amazon.awssdk.services.s3.model.GetObjectTaggingRequest;
import software.amazon.awssdk.services.s3.model.GetObjectTaggingResponse;
import software.amazon.awssdk.services.s3.model.GetObjectTorrentRequest;
import software.amazon.awssdk.services.s3.model.GetObjectTorrentResponse;
import software.amazon.awssdk.services.s3.model.GetPublicAccessBlockRequest;
import software.amazon.awssdk.services.s3.model.GetPublicAccessBlockResponse;
import software.amazon.awssdk.services.s3.model.HeadBucketRequest;
import software.amazon.awssdk.services.s3.model.HeadBucketResponse;
import software.amazon.awssdk.services.s3.model.HeadObjectRequest;
import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
import software.amazon.awssdk.services.s3.model.ListBucketAnalyticsConfigurationsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketAnalyticsConfigurationsResponse;
import software.amazon.awssdk.services.s3.model.ListBucketIntelligentTieringConfigurationsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketIntelligentTieringConfigurationsResponse;
import software.amazon.awssdk.services.s3.model.ListBucketInventoryConfigurationsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketInventoryConfigurationsResponse;
import software.amazon.awssdk.services.s3.model.ListBucketMetricsConfigurationsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketMetricsConfigurationsResponse;
import software.amazon.awssdk.services.s3.model.ListBucketsRequest;
import software.amazon.awssdk.services.s3.model.ListBucketsResponse;
import software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest;
import software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse;
import software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest;
import software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse;
import software.amazon.awssdk.services.s3.model.ListObjectsRequest;
import software.amazon.awssdk.services.s3.model.ListObjectsResponse;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
import software.amazon.awssdk.services.s3.model.ListPartsRequest;
import software.amazon.awssdk.services.s3.model.ListPartsResponse;
import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketAccelerateConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketAclRequest;
import software.amazon.awssdk.services.s3.model.PutBucketAclResponse;
import software.amazon.awssdk.services.s3.model.PutBucketAnalyticsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketAnalyticsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketCorsRequest;
import software.amazon.awssdk.services.s3.model.PutBucketCorsResponse;
import software.amazon.awssdk.services.s3.model.PutBucketEncryptionRequest;
import software.amazon.awssdk.services.s3.model.PutBucketEncryptionResponse;
import software.amazon.awssdk.services.s3.model.PutBucketIntelligentTieringConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketIntelligentTieringConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketInventoryConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketInventoryConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketLifecycleConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketLifecycleConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketLoggingRequest;
import software.amazon.awssdk.services.s3.model.PutBucketLoggingResponse;
import software.amazon.awssdk.services.s3.model.PutBucketMetricsConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketMetricsConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketNotificationConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketOwnershipControlsRequest;
import software.amazon.awssdk.services.s3.model.PutBucketOwnershipControlsResponse;
import software.amazon.awssdk.services.s3.model.PutBucketPolicyRequest;
import software.amazon.awssdk.services.s3.model.PutBucketPolicyResponse;
import software.amazon.awssdk.services.s3.model.PutBucketReplicationRequest;
import software.amazon.awssdk.services.s3.model.PutBucketReplicationResponse;
import software.amazon.awssdk.services.s3.model.PutBucketRequestPaymentRequest;
import software.amazon.awssdk.services.s3.model.PutBucketRequestPaymentResponse;
import software.amazon.awssdk.services.s3.model.PutBucketTaggingRequest;
import software.amazon.awssdk.services.s3.model.PutBucketTaggingResponse;
import software.amazon.awssdk.services.s3.model.PutBucketVersioningRequest;
import software.amazon.awssdk.services.s3.model.PutBucketVersioningResponse;
import software.amazon.awssdk.services.s3.model.PutBucketWebsiteRequest;
import software.amazon.awssdk.services.s3.model.PutBucketWebsiteResponse;
import software.amazon.awssdk.services.s3.model.PutObjectAclRequest;
import software.amazon.awssdk.services.s3.model.PutObjectAclResponse;
import software.amazon.awssdk.services.s3.model.PutObjectLegalHoldRequest;
import software.amazon.awssdk.services.s3.model.PutObjectLegalHoldResponse;
import software.amazon.awssdk.services.s3.model.PutObjectLockConfigurationRequest;
import software.amazon.awssdk.services.s3.model.PutObjectLockConfigurationResponse;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.model.PutObjectResponse;
import software.amazon.awssdk.services.s3.model.PutObjectRetentionRequest;
import software.amazon.awssdk.services.s3.model.PutObjectRetentionResponse;
import software.amazon.awssdk.services.s3.model.PutObjectTaggingRequest;
import software.amazon.awssdk.services.s3.model.PutObjectTaggingResponse;
import software.amazon.awssdk.services.s3.model.PutPublicAccessBlockRequest;
import software.amazon.awssdk.services.s3.model.PutPublicAccessBlockResponse;
import software.amazon.awssdk.services.s3.model.RestoreObjectRequest;
import software.amazon.awssdk.services.s3.model.RestoreObjectResponse;
import software.amazon.awssdk.services.s3.model.UploadPartCopyRequest;
import software.amazon.awssdk.services.s3.model.UploadPartCopyResponse;
import software.amazon.awssdk.services.s3.model.UploadPartRequest;
import software.amazon.awssdk.services.s3.model.UploadPartResponse;
import software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsPublisher;
import software.amazon.awssdk.services.s3.paginators.ListObjectVersionsPublisher;
import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Publisher;
import software.amazon.awssdk.services.s3.paginators.ListPartsPublisher;
import software.amazon.awssdk.services.s3.waiters.S3AsyncWaiter;

/**
 * Service client for accessing Amazon S3 asynchronously. This can be created using the static {@link #builder()}
 * method.
 *
 * 

*/ @Generated("software.amazon.awssdk:codegen") public interface S3AsyncClient extends SdkClient { String SERVICE_NAME = "s3"; /** * Create a {@link S3AsyncClient} with the region loaded from the * {@link software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain} and credentials loaded from the * {@link software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider}. */ static S3AsyncClient create() { return builder().build(); } /** * Create a builder that can be used to configure and create a {@link S3AsyncClient}. */ static S3AsyncClientBuilder builder() { return new DefaultS3AsyncClientBuilder(); } /** *

* This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be * uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if * any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might * be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by * all parts. *

*

* To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure that * the parts list is empty. *

*

* For information about permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to AbortMultipartUpload: *

* * * @param abortMultipartUploadRequest * @return A Java Future containing the result of the AbortMultipartUpload operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchUploadException The specified multipart upload does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.AbortMultipartUpload */ default CompletableFuture abortMultipartUpload( AbortMultipartUploadRequest abortMultipartUploadRequest) { throw new UnsupportedOperationException(); } /** *

* This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be * uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if * any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might * be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by * all parts. *

*

* To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure that * the parts list is empty. *

*

* For information about permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to AbortMultipartUpload: *

* *
*

* This is a convenience which creates an instance of the {@link AbortMultipartUploadRequest.Builder} avoiding the * need to create one manually via {@link AbortMultipartUploadRequest#builder()} *

* * @param abortMultipartUploadRequest * A {@link Consumer} that will call methods on {@link AbortMultipartUploadRequest.Builder} to create a * request. * @return A Java Future containing the result of the AbortMultipartUpload operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchUploadException The specified multipart upload does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.AbortMultipartUpload */ default CompletableFuture abortMultipartUpload( Consumer abortMultipartUploadRequest) { return abortMultipartUpload(AbortMultipartUploadRequest.builder().applyMutation(abortMultipartUploadRequest).build()); } /** *

* Completes a multipart upload by assembling previously uploaded parts. *

*

* You first initiate the multipart upload and then upload all parts using the UploadPart operation. After * successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon * receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new * object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts * list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, * you must provide the part number and the ETag value, returned after that part was uploaded. *

*

* Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins * processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in * progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a * request could fail after the initial 200 OK response has been sent, it is important that you check the response * body to determine whether the request succeeded. *

*

* Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed * requests. For more information, see Amazon S3 Error Best * Practices. *

*

* For more information about multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information about permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* CompleteMultipartUpload has the following special errors: *

*
    *
  • *

    * Error code: EntityTooSmall *

    *
      *
    • *

      * Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 * MB in size, except the last part. *

      *
    • *
    • *

      * 400 Bad Request *

      *
    • *
    *
  • *
  • *

    * Error code: InvalidPart *

    *
      *
    • *

      * Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the * specified entity tag might not have matched the part's entity tag. *

      *
    • *
    • *

      * 400 Bad Request *

      *
    • *
    *
  • *
  • *

    * Error code: InvalidPartOrder *

    *
      *
    • *

      * Description: The list of parts was not in ascending order. The parts list must be specified in order by part * number. *

      *
    • *
    • *

      * 400 Bad Request *

      *
    • *
    *
  • *
  • *

    * Error code: NoSuchUpload *

    *
      *
    • *

      * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart * upload might have been aborted or completed. *

      *
    • *
    • *

      * 404 Not Found *

      *
    • *
    *
  • *
*

* The following operations are related to CompleteMultipartUpload: *

* * * @param completeMultipartUploadRequest * @return A Java Future containing the result of the CompleteMultipartUpload operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.CompleteMultipartUpload */ default CompletableFuture completeMultipartUpload( CompleteMultipartUploadRequest completeMultipartUploadRequest) { throw new UnsupportedOperationException(); } /** *

* Completes a multipart upload by assembling previously uploaded parts. *

*

* You first initiate the multipart upload and then upload all parts using the UploadPart operation. After * successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon * receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new * object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts * list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, * you must provide the part number and the ETag value, returned after that part was uploaded. *

*

* Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins * processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in * progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a * request could fail after the initial 200 OK response has been sent, it is important that you check the response * body to determine whether the request succeeded. *

*

* Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed * requests. For more information, see Amazon S3 Error Best * Practices. *

*

* For more information about multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information about permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* CompleteMultipartUpload has the following special errors: *

*
    *
  • *

    * Error code: EntityTooSmall *

    *
      *
    • *

      * Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 * MB in size, except the last part. *

      *
    • *
    • *

      * 400 Bad Request *

      *
    • *
    *
  • *
  • *

    * Error code: InvalidPart *

    *
      *
    • *

      * Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the * specified entity tag might not have matched the part's entity tag. *

      *
    • *
    • *

      * 400 Bad Request *

      *
    • *
    *
  • *
  • *

    * Error code: InvalidPartOrder *

    *
      *
    • *

      * Description: The list of parts was not in ascending order. The parts list must be specified in order by part * number. *

      *
    • *
    • *

      * 400 Bad Request *

      *
    • *
    *
  • *
  • *

    * Error code: NoSuchUpload *

    *
      *
    • *

      * Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart * upload might have been aborted or completed. *

      *
    • *
    • *

      * 404 Not Found *

      *
    • *
    *
  • *
*

* The following operations are related to CompleteMultipartUpload: *

* *
*

* This is a convenience which creates an instance of the {@link CompleteMultipartUploadRequest.Builder} avoiding * the need to create one manually via {@link CompleteMultipartUploadRequest#builder()} *

* * @param completeMultipartUploadRequest * A {@link Consumer} that will call methods on {@link CompleteMultipartUploadRequest.Builder} to create a * request. * @return A Java Future containing the result of the CompleteMultipartUpload operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.CompleteMultipartUpload */ default CompletableFuture completeMultipartUpload( Consumer completeMultipartUploadRequest) { return completeMultipartUpload(CompleteMultipartUploadRequest.builder().applyMutation(completeMultipartUploadRequest) .build()); } /** *

* Creates a copy of an object that is already stored in Amazon S3. *

* *

* You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size * in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the * multipart upload Upload Part - Copy API. For more information, see Copy Object Using the * REST Multipart Upload API. *

*
*

* All copy requests must be authenticated. Additionally, you must have read access to the source object and * write access to the destination bucket. For more information, see REST Authentication. Both the * Region that you want to copy the object from and the Region that you want to copy the object to must be enabled * for your account. *

*

* A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the * files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error * occurs during the copy operation, the error response is embedded in the 200 OK response. This means * that a 200 OK response can contain either a success or an error. Design your application to parse * the contents of the response and handle it appropriately. *

*

* If the copy is successful, you receive a response with information about the copied object. *

* *

* If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the * content-length, and you would need to read the entire body. *

*
*

* The copy request charge is based on the storage class and Region that you specify for the destination object. For * pricing information, see Amazon S3 pricing. *

* *

* Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a * transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. *

*
*

* Metadata *

*

* When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not * preserved and is set to private for the user making the request. To override the default ACL setting, specify a * new ACL when generating a copy request. For more information, see Using ACLs. *

*

* To specify whether you want the object metadata copied from the source object or replaced with metadata provided * in the request, you can optionally add the x-amz-metadata-directive header. When you grant * permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata * behavior when objects are uploaded. For more information, see Specifying Conditions in a * Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see * Actions, Resources, and Condition * Keys for Amazon S3. *

*

* x-amz-copy-source-if Headers *

*

* To only copy an object under certain conditions, such as whether the Etag matches or whether the * object was modified before or after a specified date, use the following request parameters: *

*
    *
  • *

    * x-amz-copy-source-if-match *

    *
  • *
  • *

    * x-amz-copy-source-if-none-match *

    *
  • *
  • *

    * x-amz-copy-source-if-unmodified-since *

    *
  • *
  • *

    * x-amz-copy-source-if-modified-since *

    *
  • *
*

* If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since * headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the * data: *

*
    *
  • *

    * x-amz-copy-source-if-match condition evaluates to true *

    *
  • *
  • *

    * x-amz-copy-source-if-unmodified-since condition evaluates to false *

    *
  • *
*

* If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since * headers are present in the request and evaluate as follows, Amazon S3 returns the * 412 Precondition Failed response code: *

*
    *
  • *

    * x-amz-copy-source-if-none-match condition evaluates to false *

    *
  • *
  • *

    * x-amz-copy-source-if-modified-since condition evaluates to true *

    *
  • *
* *

* All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. *

*
*

* Encryption *

*

* The source object that you are copying can be encrypted or unencrypted. The source object can be encrypted with * server-side encryption using AWS managed encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided * encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data * centers and decrypts the data when you access it. *

*

* You can optionally use the appropriate encryption-related headers to request server-side encryption for the * target object. You have the option to provide your own encryption key or use SSE-S3 or SSE-KMS, regardless of the * form of server-side encryption that was used to encrypt the source object. You can even request encryption if the * source object was not encrypted. For more information about server-side encryption, see Using Server-Side * Encryption. *

*

* Access Control List (ACL)-Specific Request Headers *

*

* When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects * are private. Only the owner has full access control. When adding a new object, you can grant permissions to * individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL * on the object. For more information, see Access Control List (ACL) Overview * and Managing ACLs Using the * REST API. *

*

* Storage Class Options *

*

* You can use the CopyObject operation to change the storage class of an object that is already stored * in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the * Amazon S3 Service Developer Guide. *

*

* Versioning *

*

* By default, x-amz-copy-source identifies the current version of an object to copy. If the current * version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the * versionId subresource. *

*

* If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being * copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID * of the copied object in the x-amz-version-id response header in the response. *

*

* If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is * always null. *

*

* If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as * a source object for the copy operation. For more information, see RestoreObject. *

*

* The following operations are related to CopyObject: *

* *

* For more information, see Copying Objects. *

* * @param copyObjectRequest * @return A Java Future containing the result of the CopyObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • ObjectNotInActiveTierErrorException The source object of the COPY operation is not in the active tier * and is only stored in Amazon S3 Glacier.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.CopyObject */ default CompletableFuture copyObject(CopyObjectRequest copyObjectRequest) { throw new UnsupportedOperationException(); } /** *

* Creates a copy of an object that is already stored in Amazon S3. *

* *

* You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size * in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the * multipart upload Upload Part - Copy API. For more information, see Copy Object Using the * REST Multipart Upload API. *

*
*

* All copy requests must be authenticated. Additionally, you must have read access to the source object and * write access to the destination bucket. For more information, see REST Authentication. Both the * Region that you want to copy the object from and the Region that you want to copy the object to must be enabled * for your account. *

*

* A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the * files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error * occurs during the copy operation, the error response is embedded in the 200 OK response. This means * that a 200 OK response can contain either a success or an error. Design your application to parse * the contents of the response and handle it appropriately. *

*

* If the copy is successful, you receive a response with information about the copied object. *

* *

* If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the * content-length, and you would need to read the entire body. *

*
*

* The copy request charge is based on the storage class and Region that you specify for the destination object. For * pricing information, see Amazon S3 pricing. *

* *

* Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a * transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. *

*
*

* Metadata *

*

* When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not * preserved and is set to private for the user making the request. To override the default ACL setting, specify a * new ACL when generating a copy request. For more information, see Using ACLs. *

*

* To specify whether you want the object metadata copied from the source object or replaced with metadata provided * in the request, you can optionally add the x-amz-metadata-directive header. When you grant * permissions, you can use the s3:x-amz-metadata-directive condition key to enforce certain metadata * behavior when objects are uploaded. For more information, see Specifying Conditions in a * Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see * Actions, Resources, and Condition * Keys for Amazon S3. *

*

* x-amz-copy-source-if Headers *

*

* To only copy an object under certain conditions, such as whether the Etag matches or whether the * object was modified before or after a specified date, use the following request parameters: *

*
    *
  • *

    * x-amz-copy-source-if-match *

    *
  • *
  • *

    * x-amz-copy-source-if-none-match *

    *
  • *
  • *

    * x-amz-copy-source-if-unmodified-since *

    *
  • *
  • *

    * x-amz-copy-source-if-modified-since *

    *
  • *
*

* If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since * headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the * data: *

*
    *
  • *

    * x-amz-copy-source-if-match condition evaluates to true *

    *
  • *
  • *

    * x-amz-copy-source-if-unmodified-since condition evaluates to false *

    *
  • *
*

* If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since * headers are present in the request and evaluate as follows, Amazon S3 returns the * 412 Precondition Failed response code: *

*
    *
  • *

    * x-amz-copy-source-if-none-match condition evaluates to false *

    *
  • *
  • *

    * x-amz-copy-source-if-modified-since condition evaluates to true *

    *
  • *
* *

* All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. *

*
*

* Encryption *

*

* The source object that you are copying can be encrypted or unencrypted. The source object can be encrypted with * server-side encryption using AWS managed encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided * encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data * centers and decrypts the data when you access it. *

*

* You can optionally use the appropriate encryption-related headers to request server-side encryption for the * target object. You have the option to provide your own encryption key or use SSE-S3 or SSE-KMS, regardless of the * form of server-side encryption that was used to encrypt the source object. You can even request encryption if the * source object was not encrypted. For more information about server-side encryption, see Using Server-Side * Encryption. *

*

* Access Control List (ACL)-Specific Request Headers *

*

* When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects * are private. Only the owner has full access control. When adding a new object, you can grant permissions to * individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL * on the object. For more information, see Access Control List (ACL) Overview * and Managing ACLs Using the * REST API. *

*

* Storage Class Options *

*

* You can use the CopyObject operation to change the storage class of an object that is already stored * in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes in the * Amazon S3 Service Developer Guide. *

*

* Versioning *

*

* By default, x-amz-copy-source identifies the current version of an object to copy. If the current * version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the * versionId subresource. *

*

* If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being * copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID * of the copied object in the x-amz-version-id response header in the response. *

*

* If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is * always null. *

*

* If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as * a source object for the copy operation. For more information, see RestoreObject. *

*

* The following operations are related to CopyObject: *

* *

* For more information, see Copying Objects. *

*
*

* This is a convenience which creates an instance of the {@link CopyObjectRequest.Builder} avoiding the need to * create one manually via {@link CopyObjectRequest#builder()} *

* * @param copyObjectRequest * A {@link Consumer} that will call methods on {@link CopyObjectRequest.Builder} to create a request. * @return A Java Future containing the result of the CopyObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • ObjectNotInActiveTierErrorException The source object of the COPY operation is not in the active tier * and is only stored in Amazon S3 Glacier.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.CopyObject */ default CompletableFuture copyObject(Consumer copyObjectRequest) { return copyObject(CopyObjectRequest.builder().applyMutation(copyObjectRequest).build()); } /** *

* Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID * to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you * become the bucket owner. *

*

* Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Working with Amazon S3 buckets. *

*

* If you want to create an Amazon S3 on Outposts bucket, see Create Bucket. *

*

* By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the * request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. * For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe * (Ireland) Region. For more information, see Accessing a * bucket. *

* *

* If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the * us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the * Region, even if the location constraint in the request specifies another Region where the bucket is to be * created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to * handle 307 redirect. For more information, see Virtual hosting of buckets. *

*
*

* When creating a bucket using this operation, you can optionally specify the accounts or groups that should be * granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the * request headers. *

*
    *
  • *

    * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a set of predefined * ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more * information, see Canned * ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, * x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control * headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access control list (ACL) overview. *

    *

    * You specify each grantee as a type=value pair, where the type is one of the following: *

    *
      *
    • *

      * id – if the value specified is the canonical user ID of an AWS account *

      *
    • *
    • *

      * uri – if you are granting permissions to a predefined group *

      *
    • *
    • *

      * emailAddress – if the value specified is the email address of an AWS account *

      * *

      * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

      *
        *
      • *

        * US East (N. Virginia) *

        *
      • *
      • *

        * US West (N. California) *

        *
      • *
      • *

        * US West (Oregon) *

        *
      • *
      • *

        * Asia Pacific (Singapore) *

        *
      • *
      • *

        * Asia Pacific (Sydney) *

        *
      • *
      • *

        * Asia Pacific (Tokyo) *

        *
      • *
      • *

        * Europe (Ireland) *

        *
      • *
      • *

        * South America (São Paulo) *

        *
      • *
      *

      * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

      *
    • *
    *

    * For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs * permissions to read object data and its metadata: *

    *

    * x-amz-grant-read: id="11112222333", id="444455556666" *

    *
  • *
* *

* You can use either a canned ACL or specify access permissions explicitly. You cannot do both. *

*
*

* The following operations are related to CreateBucket: *

* * * @param createBucketRequest * @return A Java Future containing the result of the CreateBucket operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • BucketAlreadyExistsException The requested bucket name is not available. The bucket namespace is * shared by all users of the system. Select a different name and try again.
  • *
  • BucketAlreadyOwnedByYouException The bucket you tried to create already exists, and you own it. * Amazon S3 returns this error in all AWS Regions except in the North Virginia Region. For legacy * compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, * Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.CreateBucket */ default CompletableFuture createBucket(CreateBucketRequest createBucketRequest) { throw new UnsupportedOperationException(); } /** *

* Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID * to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you * become the bucket owner. *

*

* Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Working with Amazon S3 buckets. *

*

* If you want to create an Amazon S3 on Outposts bucket, see Create Bucket. *

*

* By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the * request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. * For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe * (Ireland) Region. For more information, see Accessing a * bucket. *

* *

* If you send your create bucket request to the s3.amazonaws.com endpoint, the request goes to the * us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the * Region, even if the location constraint in the request specifies another Region where the bucket is to be * created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to * handle 307 redirect. For more information, see Virtual hosting of buckets. *

*
*

* When creating a bucket using this operation, you can optionally specify the accounts or groups that should be * granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the * request headers. *

*
    *
  • *

    * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a set of predefined * ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more * information, see Canned * ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, * x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control * headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access control list (ACL) overview. *

    *

    * You specify each grantee as a type=value pair, where the type is one of the following: *

    *
      *
    • *

      * id – if the value specified is the canonical user ID of an AWS account *

      *
    • *
    • *

      * uri – if you are granting permissions to a predefined group *

      *
    • *
    • *

      * emailAddress – if the value specified is the email address of an AWS account *

      * *

      * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

      *
        *
      • *

        * US East (N. Virginia) *

        *
      • *
      • *

        * US West (N. California) *

        *
      • *
      • *

        * US West (Oregon) *

        *
      • *
      • *

        * Asia Pacific (Singapore) *

        *
      • *
      • *

        * Asia Pacific (Sydney) *

        *
      • *
      • *

        * Asia Pacific (Tokyo) *

        *
      • *
      • *

        * Europe (Ireland) *

        *
      • *
      • *

        * South America (São Paulo) *

        *
      • *
      *

      * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

      *
    • *
    *

    * For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs * permissions to read object data and its metadata: *

    *

    * x-amz-grant-read: id="11112222333", id="444455556666" *

    *
  • *
* *

* You can use either a canned ACL or specify access permissions explicitly. You cannot do both. *

*
*

* The following operations are related to CreateBucket: *

* *
*

* This is a convenience which creates an instance of the {@link CreateBucketRequest.Builder} avoiding the need to * create one manually via {@link CreateBucketRequest#builder()} *

* * @param createBucketRequest * A {@link Consumer} that will call methods on {@link CreateBucketRequest.Builder} to create a request. * @return A Java Future containing the result of the CreateBucket operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • BucketAlreadyExistsException The requested bucket name is not available. The bucket namespace is * shared by all users of the system. Select a different name and try again.
  • *
  • BucketAlreadyOwnedByYouException The bucket you tried to create already exists, and you own it. * Amazon S3 returns this error in all AWS Regions except in the North Virginia Region. For legacy * compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, * Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.CreateBucket */ default CompletableFuture createBucket(Consumer createBucketRequest) { return createBucket(CreateBucketRequest.builder().applyMutation(createBucketRequest).build()); } /** *

* This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of * the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part * requests (see UploadPart). You * also include this upload ID in the final request to either complete or abort the multipart upload request. *

*

* For more information about multipart uploads, see Multipart Upload Overview. *

*

* If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within * the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload * becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see * Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy. *

*

* For information about the permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send * one or more requests to upload parts, and then complete the multipart upload process. You sign each request * individually. There is nothing special about signing multipart upload requests. For more information about * signing, see Authenticating * Requests (AWS Signature Version 4). *

* *

* After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the * uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to * store the parts and stop charging you for storing them only after you either complete or abort a multipart * upload. *

*
*

* You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it * writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption * key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption * keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must * match the headers you used in the request to initiate the upload by using CreateMultipartUpload. *

*

* To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the * kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, * kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are * required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the * multipart upload. *

*

* If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then * you must have these permissions on the key policy. If your IAM user or role belongs to a different account than * the key, then you must have the permissions on both the key policy and your IAM user or role. *

*

* For more information, see Protecting Data Using * Server-Side Encryption. *

*
*
Access Permissions
*
*

* When copying an object, you can optionally specify the accounts or groups that should be granted specific * permissions on the new object. There are two ways to grant the permissions using the request headers: *

*
    *
  • *

    * Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to * the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. *

    *
  • *
*

* You can use either a canned ACL or specify access permissions explicitly. You cannot do both. *

*
*
Server-Side- Encryption-Specific Request Headers
*
*

* You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is * for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and * decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys * or provide your own encryption key. *

*
    *
  • *

    * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS * KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request. *

    *
      *
    • *

      * x-amz-server-side-encryption *

      *
    • *
    • *

      * x-amz-server-side-encryption-aws-kms-key-id *

      *
    • *
    • *

      * x-amz-server-side-encryption-context *

      *
    • *
    * *

    * If you specify x-amz-server-side-encryption:aws:kms, but don't provide * x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to * protect the data. *

    *
    *

    * All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using * SigV4. *

    *
    *

    * For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side * Encryption with CMKs stored in AWS KMS. *

    *
  • *
  • *

    * Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following * headers in the request. *

    *
      *
    • *

      * x-amz-server-side-encryption-customer-algorithm *

      *
    • *
    • *

      * x-amz-server-side-encryption-customer-key *

      *
    • *
    • *

      * x-amz-server-side-encryption-customer-key-MD5 *

      *
    • *
    *

    * For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side * Encryption with CMKs stored in AWS KMS. *

    *
  • *
*
*
Access-Control-List (ACL)-Specific Request Headers
*
*

* You also can use the following access control–related headers with this operation. By default, all objects are * private. Only the owner has full access control. When adding a new object, you can grant permissions to * individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the * access control list (ACL) on the object. For more information, see Using ACLs. With this * operation, you can grant access permissions using one of the following two methods: *

*
    *
  • *

    * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned * ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or * groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. * For more information, see Access * Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. * To grant permissions explicitly, use: *

    *
      *
    • *

      * x-amz-grant-read *

      *
    • *
    • *

      * x-amz-grant-write *

      *
    • *
    • *

      * x-amz-grant-read-acp *

      *
    • *
    • *

      * x-amz-grant-write-acp *

      *
    • *
    • *

      * x-amz-grant-full-control *

      *
    • *
    *

    * You specify each grantee as a type=value pair, where the type is one of the following: *

    *
      *
    • *

      * id – if the value specified is the canonical user ID of an AWS account *

      *
    • *
    • *

      * uri – if you are granting permissions to a predefined group *

      *
    • *
    • *

      * emailAddress – if the value specified is the email address of an AWS account *

      * *

      * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

      *
        *
      • *

        * US East (N. Virginia) *

        *
      • *
      • *

        * US West (N. California) *

        *
      • *
      • *

        * US West (Oregon) *

        *
      • *
      • *

        * Asia Pacific (Singapore) *

        *
      • *
      • *

        * Asia Pacific (Sydney) *

        *
      • *
      • *

        * Asia Pacific (Tokyo) *

        *
      • *
      • *

        * Europe (Ireland) *

        *
      • *
      • *

        * South America (São Paulo) *

        *
      • *
      *

      * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

      *
    • *
    *

    * For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs * permissions to read object data and its metadata: *

    *

    * x-amz-grant-read: id="11112222333", id="444455556666" *

    *
  • *
*
*
*

* The following operations are related to CreateMultipartUpload: *

* * * @param createMultipartUploadRequest * @return A Java Future containing the result of the CreateMultipartUpload operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.CreateMultipartUpload */ default CompletableFuture createMultipartUpload( CreateMultipartUploadRequest createMultipartUploadRequest) { throw new UnsupportedOperationException(); } /** *

* This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of * the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part * requests (see UploadPart). You * also include this upload ID in the final request to either complete or abort the multipart upload request. *

*

* For more information about multipart uploads, see Multipart Upload Overview. *

*

* If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within * the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload * becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see * Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy. *

*

* For information about the permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send * one or more requests to upload parts, and then complete the multipart upload process. You sign each request * individually. There is nothing special about signing multipart upload requests. For more information about * signing, see Authenticating * Requests (AWS Signature Version 4). *

* *

* After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the * uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to * store the parts and stop charging you for storing them only after you either complete or abort a multipart * upload. *

*
*

* You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it * writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption * key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption * keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must * match the headers you used in the request to initiate the upload by using CreateMultipartUpload. *

*

* To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the * kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, * kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are * required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the * multipart upload. *

*

* If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then * you must have these permissions on the key policy. If your IAM user or role belongs to a different account than * the key, then you must have the permissions on both the key policy and your IAM user or role. *

*

* For more information, see Protecting Data Using * Server-Side Encryption. *

*
*
Access Permissions
*
*

* When copying an object, you can optionally specify the accounts or groups that should be granted specific * permissions on the new object. There are two ways to grant the permissions using the request headers: *

*
    *
  • *

    * Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to * the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. *

    *
  • *
*

* You can use either a canned ACL or specify access permissions explicitly. You cannot do both. *

*
*
Server-Side- Encryption-Specific Request Headers
*
*

* You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is * for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and * decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys * or provide your own encryption key. *

*
    *
  • *

    * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS * KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request. *

    *
      *
    • *

      * x-amz-server-side-encryption *

      *
    • *
    • *

      * x-amz-server-side-encryption-aws-kms-key-id *

      *
    • *
    • *

      * x-amz-server-side-encryption-context *

      *
    • *
    * *

    * If you specify x-amz-server-side-encryption:aws:kms, but don't provide * x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to * protect the data. *

    *
    *

    * All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using * SigV4. *

    *
    *

    * For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side * Encryption with CMKs stored in AWS KMS. *

    *
  • *
  • *

    * Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following * headers in the request. *

    *
      *
    • *

      * x-amz-server-side-encryption-customer-algorithm *

      *
    • *
    • *

      * x-amz-server-side-encryption-customer-key *

      *
    • *
    • *

      * x-amz-server-side-encryption-customer-key-MD5 *

      *
    • *
    *

    * For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side * Encryption with CMKs stored in AWS KMS. *

    *
  • *
*
*
Access-Control-List (ACL)-Specific Request Headers
*
*

* You also can use the following access control–related headers with this operation. By default, all objects are * private. Only the owner has full access control. When adding a new object, you can grant permissions to * individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the * access control list (ACL) on the object. For more information, see Using ACLs. With this * operation, you can grant access permissions using one of the following two methods: *

*
    *
  • *

    * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned * ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or * groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. * For more information, see Access * Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. * To grant permissions explicitly, use: *

    *
      *
    • *

      * x-amz-grant-read *

      *
    • *
    • *

      * x-amz-grant-write *

      *
    • *
    • *

      * x-amz-grant-read-acp *

      *
    • *
    • *

      * x-amz-grant-write-acp *

      *
    • *
    • *

      * x-amz-grant-full-control *

      *
    • *
    *

    * You specify each grantee as a type=value pair, where the type is one of the following: *

    *
      *
    • *

      * id – if the value specified is the canonical user ID of an AWS account *

      *
    • *
    • *

      * uri – if you are granting permissions to a predefined group *

      *
    • *
    • *

      * emailAddress – if the value specified is the email address of an AWS account *

      * *

      * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

      *
        *
      • *

        * US East (N. Virginia) *

        *
      • *
      • *

        * US West (N. California) *

        *
      • *
      • *

        * US West (Oregon) *

        *
      • *
      • *

        * Asia Pacific (Singapore) *

        *
      • *
      • *

        * Asia Pacific (Sydney) *

        *
      • *
      • *

        * Asia Pacific (Tokyo) *

        *
      • *
      • *

        * Europe (Ireland) *

        *
      • *
      • *

        * South America (São Paulo) *

        *
      • *
      *

      * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

      *
    • *
    *

    * For example, the following x-amz-grant-read header grants the AWS accounts identified by account IDs * permissions to read object data and its metadata: *

    *

    * x-amz-grant-read: id="11112222333", id="444455556666" *

    *
  • *
*
*
*

* The following operations are related to CreateMultipartUpload: *

* *
*

* This is a convenience which creates an instance of the {@link CreateMultipartUploadRequest.Builder} avoiding the * need to create one manually via {@link CreateMultipartUploadRequest#builder()} *

* * @param createMultipartUploadRequest * A {@link Consumer} that will call methods on {@link CreateMultipartUploadRequest.Builder} to create a * request. * @return A Java Future containing the result of the CreateMultipartUpload operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.CreateMultipartUpload */ default CompletableFuture createMultipartUpload( Consumer createMultipartUploadRequest) { return createMultipartUpload(CreateMultipartUploadRequest.builder().applyMutation(createMultipartUploadRequest).build()); } /** *

* Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be * deleted before the bucket itself can be deleted. *

*

* Related Resources *

* * * @param deleteBucketRequest * @return A Java Future containing the result of the DeleteBucket operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucket */ default CompletableFuture deleteBucket(DeleteBucketRequest deleteBucketRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be * deleted before the bucket itself can be deleted. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketRequest.Builder} avoiding the need to * create one manually via {@link DeleteBucketRequest#builder()} *

* * @param deleteBucketRequest * A {@link Consumer} that will call methods on {@link DeleteBucketRequest.Builder} to create a request. * @return A Java Future containing the result of the DeleteBucket operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucket */ default CompletableFuture deleteBucket(Consumer deleteBucketRequest) { return deleteBucket(DeleteBucketRequest.builder().applyMutation(deleteBucketRequest).build()); } /** *

* Deletes an analytics configuration for the bucket (specified by the analytics configuration ID). *

*

* To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage * Class Analysis. *

*

* The following operations are related to DeleteBucketAnalyticsConfiguration: *

* * * @param deleteBucketAnalyticsConfigurationRequest * @return A Java Future containing the result of the DeleteBucketAnalyticsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketAnalyticsConfiguration */ default CompletableFuture deleteBucketAnalyticsConfiguration( DeleteBucketAnalyticsConfigurationRequest deleteBucketAnalyticsConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes an analytics configuration for the bucket (specified by the analytics configuration ID). *

*

* To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage * Class Analysis. *

*

* The following operations are related to DeleteBucketAnalyticsConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketAnalyticsConfigurationRequest.Builder} * avoiding the need to create one manually via {@link DeleteBucketAnalyticsConfigurationRequest#builder()} *

* * @param deleteBucketAnalyticsConfigurationRequest * A {@link Consumer} that will call methods on {@link DeleteBucketAnalyticsConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the DeleteBucketAnalyticsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketAnalyticsConfiguration */ default CompletableFuture deleteBucketAnalyticsConfiguration( Consumer deleteBucketAnalyticsConfigurationRequest) { return deleteBucketAnalyticsConfiguration(DeleteBucketAnalyticsConfigurationRequest.builder() .applyMutation(deleteBucketAnalyticsConfigurationRequest).build()); } /** *

* Deletes the cors configuration information set for the bucket. *

*

* To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket * owner has this permission by default and can grant this permission to others. *

*

* For information about cors, see Enabling Cross-Origin Resource Sharing in * the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources: *

* * * @param deleteBucketCorsRequest * @return A Java Future containing the result of the DeleteBucketCors operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketCors */ default CompletableFuture deleteBucketCors(DeleteBucketCorsRequest deleteBucketCorsRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes the cors configuration information set for the bucket. *

*

* To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket * owner has this permission by default and can grant this permission to others. *

*

* For information about cors, see Enabling Cross-Origin Resource Sharing in * the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketCorsRequest.Builder} avoiding the need * to create one manually via {@link DeleteBucketCorsRequest#builder()} *

* * @param deleteBucketCorsRequest * A {@link Consumer} that will call methods on {@link DeleteBucketCorsRequest.Builder} to create a request. * @return A Java Future containing the result of the DeleteBucketCors operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketCors */ default CompletableFuture deleteBucketCors( Consumer deleteBucketCorsRequest) { return deleteBucketCors(DeleteBucketCorsRequest.builder().applyMutation(deleteBucketCorsRequest).build()); } /** *

* This implementation of the DELETE operation removes default encryption from the bucket. For information about the * Amazon S3 default encryption feature, see Amazon S3 Default Bucket * Encryption in the Amazon Simple Storage Service Developer Guide. *

*

* To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration * action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. * For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* * * @param deleteBucketEncryptionRequest * @return A Java Future containing the result of the DeleteBucketEncryption operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketEncryption */ default CompletableFuture deleteBucketEncryption( DeleteBucketEncryptionRequest deleteBucketEncryptionRequest) { throw new UnsupportedOperationException(); } /** *

* This implementation of the DELETE operation removes default encryption from the bucket. For information about the * Amazon S3 default encryption feature, see Amazon S3 Default Bucket * Encryption in the Amazon Simple Storage Service Developer Guide. *

*

* To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration * action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. * For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketEncryptionRequest.Builder} avoiding the * need to create one manually via {@link DeleteBucketEncryptionRequest#builder()} *

* * @param deleteBucketEncryptionRequest * A {@link Consumer} that will call methods on {@link DeleteBucketEncryptionRequest.Builder} to create a * request. * @return A Java Future containing the result of the DeleteBucketEncryption operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketEncryption */ default CompletableFuture deleteBucketEncryption( Consumer deleteBucketEncryptionRequest) { return deleteBucketEncryption(DeleteBucketEncryptionRequest.builder().applyMutation(deleteBucketEncryptionRequest) .build()); } /** *

* Deletes the S3 Intelligent-Tiering configuration from the specified bucket. *

*

* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to * the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings by moving data between access tiers, when access patterns change. *

*

* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at * least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects * can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering * storage class. *

*

* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 * days. For more information, see Storage * class for automatically optimizing frequently and infrequently accessed objects. *

*

* Operations related to DeleteBucketIntelligentTieringConfiguration include: *

* * * @param deleteBucketIntelligentTieringConfigurationRequest * @return A Java Future containing the result of the DeleteBucketIntelligentTieringConfiguration operation returned * by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketIntelligentTieringConfiguration */ default CompletableFuture deleteBucketIntelligentTieringConfiguration( DeleteBucketIntelligentTieringConfigurationRequest deleteBucketIntelligentTieringConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes the S3 Intelligent-Tiering configuration from the specified bucket. *

*

* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to * the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings by moving data between access tiers, when access patterns change. *

*

* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at * least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects * can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering * storage class. *

*

* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 * days. For more information, see Storage * class for automatically optimizing frequently and infrequently accessed objects. *

*

* Operations related to DeleteBucketIntelligentTieringConfiguration include: *

* *
*

* This is a convenience which creates an instance of the * {@link DeleteBucketIntelligentTieringConfigurationRequest.Builder} avoiding the need to create one manually via * {@link DeleteBucketIntelligentTieringConfigurationRequest#builder()} *

* * @param deleteBucketIntelligentTieringConfigurationRequest * A {@link Consumer} that will call methods on * {@link DeleteBucketIntelligentTieringConfigurationRequest.Builder} to create a request. * @return A Java Future containing the result of the DeleteBucketIntelligentTieringConfiguration operation returned * by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketIntelligentTieringConfiguration */ default CompletableFuture deleteBucketIntelligentTieringConfiguration( Consumer deleteBucketIntelligentTieringConfigurationRequest) { return deleteBucketIntelligentTieringConfiguration(DeleteBucketIntelligentTieringConfigurationRequest.builder() .applyMutation(deleteBucketIntelligentTieringConfigurationRequest).build()); } /** *

* Deletes an inventory configuration (identified by the inventory ID) from the bucket. *

*

* To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory. *

*

* Operations related to DeleteBucketInventoryConfiguration include: *

* * * @param deleteBucketInventoryConfigurationRequest * @return A Java Future containing the result of the DeleteBucketInventoryConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketInventoryConfiguration */ default CompletableFuture deleteBucketInventoryConfiguration( DeleteBucketInventoryConfigurationRequest deleteBucketInventoryConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes an inventory configuration (identified by the inventory ID) from the bucket. *

*

* To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory. *

*

* Operations related to DeleteBucketInventoryConfiguration include: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketInventoryConfigurationRequest.Builder} * avoiding the need to create one manually via {@link DeleteBucketInventoryConfigurationRequest#builder()} *

* * @param deleteBucketInventoryConfigurationRequest * A {@link Consumer} that will call methods on {@link DeleteBucketInventoryConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the DeleteBucketInventoryConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketInventoryConfiguration */ default CompletableFuture deleteBucketInventoryConfiguration( Consumer deleteBucketInventoryConfigurationRequest) { return deleteBucketInventoryConfiguration(DeleteBucketInventoryConfigurationRequest.builder() .applyMutation(deleteBucketInventoryConfigurationRequest).build()); } /** *

* Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration * rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer * automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. *

*

* To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration action. * By default, the bucket owner has this permission and the bucket owner can grant this permission to others. *

*

* There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3 * systems. *

*

* For more information about the object expiration, see Elements to Describe Lifecycle Actions. *

*

* Related actions include: *

* * * @param deleteBucketLifecycleRequest * @return A Java Future containing the result of the DeleteBucketLifecycle operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketLifecycle */ default CompletableFuture deleteBucketLifecycle( DeleteBucketLifecycleRequest deleteBucketLifecycleRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration * rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer * automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. *

*

* To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration action. * By default, the bucket owner has this permission and the bucket owner can grant this permission to others. *

*

* There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3 * systems. *

*

* For more information about the object expiration, see Elements to Describe Lifecycle Actions. *

*

* Related actions include: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketLifecycleRequest.Builder} avoiding the * need to create one manually via {@link DeleteBucketLifecycleRequest#builder()} *

* * @param deleteBucketLifecycleRequest * A {@link Consumer} that will call methods on {@link DeleteBucketLifecycleRequest.Builder} to create a * request. * @return A Java Future containing the result of the DeleteBucketLifecycle operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketLifecycle */ default CompletableFuture deleteBucketLifecycle( Consumer deleteBucketLifecycleRequest) { return deleteBucketLifecycle(DeleteBucketLifecycleRequest.builder().applyMutation(deleteBucketLifecycleRequest).build()); } /** *

* Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration * ID) from the bucket. Note that this doesn't include the daily storage metrics. *

*

* To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon * CloudWatch. *

*

* The following operations are related to DeleteBucketMetricsConfiguration: *

* * * @param deleteBucketMetricsConfigurationRequest * @return A Java Future containing the result of the DeleteBucketMetricsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketMetricsConfiguration */ default CompletableFuture deleteBucketMetricsConfiguration( DeleteBucketMetricsConfigurationRequest deleteBucketMetricsConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration * ID) from the bucket. Note that this doesn't include the daily storage metrics. *

*

* To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon * CloudWatch. *

*

* The following operations are related to DeleteBucketMetricsConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketMetricsConfigurationRequest.Builder} * avoiding the need to create one manually via {@link DeleteBucketMetricsConfigurationRequest#builder()} *

* * @param deleteBucketMetricsConfigurationRequest * A {@link Consumer} that will call methods on {@link DeleteBucketMetricsConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the DeleteBucketMetricsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketMetricsConfiguration */ default CompletableFuture deleteBucketMetricsConfiguration( Consumer deleteBucketMetricsConfigurationRequest) { return deleteBucketMetricsConfiguration(DeleteBucketMetricsConfigurationRequest.builder() .applyMutation(deleteBucketMetricsConfigurationRequest).build()); } /** *

* Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the * s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a * Policy. *

*

* For information about Amazon S3 Object Ownership, see Using Object Ownership. *

*

* The following operations are related to DeleteBucketOwnershipControls: *

* * * @param deleteBucketOwnershipControlsRequest * @return A Java Future containing the result of the DeleteBucketOwnershipControls operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketOwnershipControls */ default CompletableFuture deleteBucketOwnershipControls( DeleteBucketOwnershipControlsRequest deleteBucketOwnershipControlsRequest) { throw new UnsupportedOperationException(); } /** *

* Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the * s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a * Policy. *

*

* For information about Amazon S3 Object Ownership, see Using Object Ownership. *

*

* The following operations are related to DeleteBucketOwnershipControls: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketOwnershipControlsRequest.Builder} * avoiding the need to create one manually via {@link DeleteBucketOwnershipControlsRequest#builder()} *

* * @param deleteBucketOwnershipControlsRequest * A {@link Consumer} that will call methods on {@link DeleteBucketOwnershipControlsRequest.Builder} to * create a request. * @return A Java Future containing the result of the DeleteBucketOwnershipControls operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketOwnershipControls */ default CompletableFuture deleteBucketOwnershipControls( Consumer deleteBucketOwnershipControlsRequest) { return deleteBucketOwnershipControls(DeleteBucketOwnershipControlsRequest.builder() .applyMutation(deleteBucketOwnershipControlsRequest).build()); } /** *

* This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified * bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the * calling identity must have the DeleteBucketPolicy permissions on the specified bucket and belong to * the bucket owner's account to use this operation. *

*

* If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied * error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's * account, Amazon S3 returns a 405 Method Not Allowed error. *

* *

* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even * if the policy explicitly denies the root user the ability to perform this action. *

*
*

* For more information about bucket policies, see Using Bucket Policies and * UserPolicies. *

*

* The following operations are related to DeleteBucketPolicy *

* * * @param deleteBucketPolicyRequest * @return A Java Future containing the result of the DeleteBucketPolicy operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketPolicy */ default CompletableFuture deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest) { throw new UnsupportedOperationException(); } /** *

* This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified * bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the * calling identity must have the DeleteBucketPolicy permissions on the specified bucket and belong to * the bucket owner's account to use this operation. *

*

* If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied * error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's * account, Amazon S3 returns a 405 Method Not Allowed error. *

* *

* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even * if the policy explicitly denies the root user the ability to perform this action. *

*
*

* For more information about bucket policies, see Using Bucket Policies and * UserPolicies. *

*

* The following operations are related to DeleteBucketPolicy *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketPolicyRequest.Builder} avoiding the * need to create one manually via {@link DeleteBucketPolicyRequest#builder()} *

* * @param deleteBucketPolicyRequest * A {@link Consumer} that will call methods on {@link DeleteBucketPolicyRequest.Builder} to create a * request. * @return A Java Future containing the result of the DeleteBucketPolicy operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketPolicy */ default CompletableFuture deleteBucketPolicy( Consumer deleteBucketPolicyRequest) { return deleteBucketPolicy(DeleteBucketPolicyRequest.builder().applyMutation(deleteBucketPolicyRequest).build()); } /** *

* Deletes the replication configuration from the bucket. *

*

* To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration * action. The bucket owner has these permissions by default and can grant it to others. For more information about * permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

* *

* It can take a while for the deletion of a replication configuration to fully propagate. *

*
*

* For information about replication configuration, see Replication in the Amazon S3 * Developer Guide. *

*

* The following operations are related to DeleteBucketReplication: *

* * * @param deleteBucketReplicationRequest * @return A Java Future containing the result of the DeleteBucketReplication operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketReplication */ default CompletableFuture deleteBucketReplication( DeleteBucketReplicationRequest deleteBucketReplicationRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes the replication configuration from the bucket. *

*

* To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration * action. The bucket owner has these permissions by default and can grant it to others. For more information about * permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

* *

* It can take a while for the deletion of a replication configuration to fully propagate. *

*
*

* For information about replication configuration, see Replication in the Amazon S3 * Developer Guide. *

*

* The following operations are related to DeleteBucketReplication: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketReplicationRequest.Builder} avoiding * the need to create one manually via {@link DeleteBucketReplicationRequest#builder()} *

* * @param deleteBucketReplicationRequest * A {@link Consumer} that will call methods on {@link DeleteBucketReplicationRequest.Builder} to create a * request. * @return A Java Future containing the result of the DeleteBucketReplication operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketReplication */ default CompletableFuture deleteBucketReplication( Consumer deleteBucketReplicationRequest) { return deleteBucketReplication(DeleteBucketReplicationRequest.builder().applyMutation(deleteBucketReplicationRequest) .build()); } /** *

* Deletes the tags from the bucket. *

*

* To use this operation, you must have permission to perform the s3:PutBucketTagging action. By * default, the bucket owner has this permission and can grant this permission to others. *

*

* The following operations are related to DeleteBucketTagging: *

* * * @param deleteBucketTaggingRequest * @return A Java Future containing the result of the DeleteBucketTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketTagging */ default CompletableFuture deleteBucketTagging( DeleteBucketTaggingRequest deleteBucketTaggingRequest) { throw new UnsupportedOperationException(); } /** *

* Deletes the tags from the bucket. *

*

* To use this operation, you must have permission to perform the s3:PutBucketTagging action. By * default, the bucket owner has this permission and can grant this permission to others. *

*

* The following operations are related to DeleteBucketTagging: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketTaggingRequest.Builder} avoiding the * need to create one manually via {@link DeleteBucketTaggingRequest#builder()} *

* * @param deleteBucketTaggingRequest * A {@link Consumer} that will call methods on {@link DeleteBucketTaggingRequest.Builder} to create a * request. * @return A Java Future containing the result of the DeleteBucketTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketTagging */ default CompletableFuture deleteBucketTagging( Consumer deleteBucketTaggingRequest) { return deleteBucketTagging(DeleteBucketTaggingRequest.builder().applyMutation(deleteBucketTaggingRequest).build()); } /** *

* This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK response * upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK * response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a * 404 response if the bucket specified in the request does not exist. *

*

* This DELETE operation requires the S3:DeleteBucketWebsite permission. By default, only the bucket * owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users * permission to delete the website configuration by writing a bucket policy granting them the * S3:DeleteBucketWebsite permission. *

*

* For more information about hosting websites, see Hosting Websites on Amazon S3. *

*

* The following operations are related to DeleteBucketWebsite: *

* * * @param deleteBucketWebsiteRequest * @return A Java Future containing the result of the DeleteBucketWebsite operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketWebsite */ default CompletableFuture deleteBucketWebsite( DeleteBucketWebsiteRequest deleteBucketWebsiteRequest) { throw new UnsupportedOperationException(); } /** *

* This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK response * upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK * response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a * 404 response if the bucket specified in the request does not exist. *

*

* This DELETE operation requires the S3:DeleteBucketWebsite permission. By default, only the bucket * owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users * permission to delete the website configuration by writing a bucket policy granting them the * S3:DeleteBucketWebsite permission. *

*

* For more information about hosting websites, see Hosting Websites on Amazon S3. *

*

* The following operations are related to DeleteBucketWebsite: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteBucketWebsiteRequest.Builder} avoiding the * need to create one manually via {@link DeleteBucketWebsiteRequest#builder()} *

* * @param deleteBucketWebsiteRequest * A {@link Consumer} that will call methods on {@link DeleteBucketWebsiteRequest.Builder} to create a * request. * @return A Java Future containing the result of the DeleteBucketWebsite operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteBucketWebsite */ default CompletableFuture deleteBucketWebsite( Consumer deleteBucketWebsiteRequest) { return deleteBucketWebsite(DeleteBucketWebsiteRequest.builder().applyMutation(deleteBucketWebsiteRequest).build()); } /** *

* Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest * version of the object. If there isn't a null version, Amazon S3 does not remove any objects. *

*

* To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using * this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the * response header, x-amz-delete-marker, to true. *

*

* If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, * you must include the x-amz-mfa request header in the DELETE versionId request. Requests * that include x-amz-mfa must use HTTPS. *

*

* For more information about MFA Delete, see Using MFA Delete. To see sample * requests that use versioning, see Sample * Request. *

*

* You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to * enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects * from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and * s3:PutLifeCycleConfiguration actions. *

*

* The following operation is related to DeleteObject: *

* * * @param deleteObjectRequest * @return A Java Future containing the result of the DeleteObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteObject */ default CompletableFuture deleteObject(DeleteObjectRequest deleteObjectRequest) { throw new UnsupportedOperationException(); } /** *

* Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest * version of the object. If there isn't a null version, Amazon S3 does not remove any objects. *

*

* To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using * this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the * response header, x-amz-delete-marker, to true. *

*

* If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, * you must include the x-amz-mfa request header in the DELETE versionId request. Requests * that include x-amz-mfa must use HTTPS. *

*

* For more information about MFA Delete, see Using MFA Delete. To see sample * requests that use versioning, see Sample * Request. *

*

* You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to * enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects * from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and * s3:PutLifeCycleConfiguration actions. *

*

* The following operation is related to DeleteObject: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteObjectRequest.Builder} avoiding the need to * create one manually via {@link DeleteObjectRequest#builder()} *

* * @param deleteObjectRequest * A {@link Consumer} that will call methods on {@link DeleteObjectRequest.Builder} to create a request. * @return A Java Future containing the result of the DeleteObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteObject */ default CompletableFuture deleteObject(Consumer deleteObjectRequest) { return deleteObject(DeleteObjectRequest.builder().applyMutation(deleteObjectRequest).build()); } /** *

* Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging. *

*

* To use this operation, you must have permission to perform the s3:DeleteObjectTagging action. *

*

* To delete tags of a specific object version, add the versionId query parameter in the request. You * will need permission for the s3:DeleteObjectVersionTagging action. *

*

* The following operations are related to DeleteBucketMetricsConfiguration: *

* * * @param deleteObjectTaggingRequest * @return A Java Future containing the result of the DeleteObjectTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteObjectTagging */ default CompletableFuture deleteObjectTagging( DeleteObjectTaggingRequest deleteObjectTaggingRequest) { throw new UnsupportedOperationException(); } /** *

* Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging. *

*

* To use this operation, you must have permission to perform the s3:DeleteObjectTagging action. *

*

* To delete tags of a specific object version, add the versionId query parameter in the request. You * will need permission for the s3:DeleteObjectVersionTagging action. *

*

* The following operations are related to DeleteBucketMetricsConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteObjectTaggingRequest.Builder} avoiding the * need to create one manually via {@link DeleteObjectTaggingRequest#builder()} *

* * @param deleteObjectTaggingRequest * A {@link Consumer} that will call methods on {@link DeleteObjectTaggingRequest.Builder} to create a * request. * @return A Java Future containing the result of the DeleteObjectTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteObjectTagging */ default CompletableFuture deleteObjectTagging( Consumer deleteObjectTaggingRequest) { return deleteObjectTagging(DeleteObjectTaggingRequest.builder().applyMutation(deleteObjectTaggingRequest).build()); } /** *

* This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the * object keys that you want to delete, then this operation provides a suitable alternative to sending individual * delete requests, reducing per-request overhead. *

*

* The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key * names, and optionally, version IDs if you want to delete a specific version of the object from a * versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that * delete, success, or failure, in the response. Note that if the object specified in the request is not found, * Amazon S3 returns the result as deleted. *

*

* The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode * in which the response includes the result of deletion of each key in your request. In quiet mode the response * includes only keys where the delete operation encountered an error. For a successful deletion, the operation does * not return any information about the delete in the response body. *

*

* When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, * you must include an MFA token. If you do not provide one, the entire request will fail, even if there are * non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys * in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA * Delete. *

*

* Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value * to ensure that your request body has not been altered in transit. *

*

* The following operations are related to DeleteObjects: *

* * * @param deleteObjectsRequest * @return A Java Future containing the result of the DeleteObjects operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteObjects */ default CompletableFuture deleteObjects(DeleteObjectsRequest deleteObjectsRequest) { throw new UnsupportedOperationException(); } /** *

* This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the * object keys that you want to delete, then this operation provides a suitable alternative to sending individual * delete requests, reducing per-request overhead. *

*

* The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key * names, and optionally, version IDs if you want to delete a specific version of the object from a * versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that * delete, success, or failure, in the response. Note that if the object specified in the request is not found, * Amazon S3 returns the result as deleted. *

*

* The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode * in which the response includes the result of deletion of each key in your request. In quiet mode the response * includes only keys where the delete operation encountered an error. For a successful deletion, the operation does * not return any information about the delete in the response body. *

*

* When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, * you must include an MFA token. If you do not provide one, the entire request will fail, even if there are * non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys * in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA * Delete. *

*

* Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value * to ensure that your request body has not been altered in transit. *

*

* The following operations are related to DeleteObjects: *

* *
*

* This is a convenience which creates an instance of the {@link DeleteObjectsRequest.Builder} avoiding the need to * create one manually via {@link DeleteObjectsRequest#builder()} *

* * @param deleteObjectsRequest * A {@link Consumer} that will call methods on {@link DeleteObjectsRequest.Builder} to create a request. * @return A Java Future containing the result of the DeleteObjects operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeleteObjects */ default CompletableFuture deleteObjects(Consumer deleteObjectsRequest) { return deleteObjects(DeleteObjectsRequest.builder().applyMutation(deleteObjectsRequest).build()); } /** *

* Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must * have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* The following operations are related to DeletePublicAccessBlock: *

* * * @param deletePublicAccessBlockRequest * @return A Java Future containing the result of the DeletePublicAccessBlock operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeletePublicAccessBlock */ default CompletableFuture deletePublicAccessBlock( DeletePublicAccessBlockRequest deletePublicAccessBlockRequest) { throw new UnsupportedOperationException(); } /** *

* Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must * have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* The following operations are related to DeletePublicAccessBlock: *

* *
*

* This is a convenience which creates an instance of the {@link DeletePublicAccessBlockRequest.Builder} avoiding * the need to create one manually via {@link DeletePublicAccessBlockRequest#builder()} *

* * @param deletePublicAccessBlockRequest * A {@link Consumer} that will call methods on {@link DeletePublicAccessBlockRequest.Builder} to create a * request. * @return A Java Future containing the result of the DeletePublicAccessBlock operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.DeletePublicAccessBlock */ default CompletableFuture deletePublicAccessBlock( Consumer deletePublicAccessBlockRequest) { return deletePublicAccessBlock(DeletePublicAccessBlockRequest.builder().applyMutation(deletePublicAccessBlockRequest) .build()); } /** *

* This implementation of the GET operation uses the accelerate subresource to return the Transfer * Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 * Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from * Amazon S3. *

*

* To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended * by using the * PutBucketAccelerateConfiguration operation. *

*

* A GET accelerate request does not return a state value for a bucket that has no transfer * acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket. *

*

* For more information about transfer acceleration, see Transfer Acceleration in * the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* * * @param getBucketAccelerateConfigurationRequest * @return A Java Future containing the result of the GetBucketAccelerateConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketAccelerateConfiguration */ default CompletableFuture getBucketAccelerateConfiguration( GetBucketAccelerateConfigurationRequest getBucketAccelerateConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* This implementation of the GET operation uses the accelerate subresource to return the Transfer * Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 * Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from * Amazon S3. *

*

* To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended * by using the * PutBucketAccelerateConfiguration operation. *

*

* A GET accelerate request does not return a state value for a bucket that has no transfer * acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket. *

*

* For more information about transfer acceleration, see Transfer Acceleration in * the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketAccelerateConfigurationRequest.Builder} * avoiding the need to create one manually via {@link GetBucketAccelerateConfigurationRequest#builder()} *

* * @param getBucketAccelerateConfigurationRequest * A {@link Consumer} that will call methods on {@link GetBucketAccelerateConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the GetBucketAccelerateConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketAccelerateConfiguration */ default CompletableFuture getBucketAccelerateConfiguration( Consumer getBucketAccelerateConfigurationRequest) { return getBucketAccelerateConfiguration(GetBucketAccelerateConfigurationRequest.builder() .applyMutation(getBucketAccelerateConfigurationRequest).build()); } /** *

* This implementation of the GET operation uses the acl subresource to return the access * control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have * READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, * you can return the ACL of the bucket without using an authorization header. *

*

* Related Resources *

* * * @param getBucketAclRequest * @return A Java Future containing the result of the GetBucketAcl operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketAcl */ default CompletableFuture getBucketAcl(GetBucketAclRequest getBucketAclRequest) { throw new UnsupportedOperationException(); } /** *

* This implementation of the GET operation uses the acl subresource to return the access * control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have * READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, * you can return the ACL of the bucket without using an authorization header. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketAclRequest.Builder} avoiding the need to * create one manually via {@link GetBucketAclRequest#builder()} *

* * @param getBucketAclRequest * A {@link Consumer} that will call methods on {@link GetBucketAclRequest.Builder} to create a request. * @return A Java Future containing the result of the GetBucketAcl operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketAcl */ default CompletableFuture getBucketAcl(Consumer getBucketAclRequest) { return getBucketAcl(GetBucketAclRequest.builder().applyMutation(getBucketAclRequest).build()); } /** *

* This implementation of the GET operation returns an analytics configuration (identified by the analytics * configuration ID) from the bucket. *

*

* To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage * Class Analysis in the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* * * @param getBucketAnalyticsConfigurationRequest * @return A Java Future containing the result of the GetBucketAnalyticsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketAnalyticsConfiguration */ default CompletableFuture getBucketAnalyticsConfiguration( GetBucketAnalyticsConfigurationRequest getBucketAnalyticsConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* This implementation of the GET operation returns an analytics configuration (identified by the analytics * configuration ID) from the bucket. *

*

* To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage * Class Analysis in the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketAnalyticsConfigurationRequest.Builder} * avoiding the need to create one manually via {@link GetBucketAnalyticsConfigurationRequest#builder()} *

* * @param getBucketAnalyticsConfigurationRequest * A {@link Consumer} that will call methods on {@link GetBucketAnalyticsConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the GetBucketAnalyticsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketAnalyticsConfiguration */ default CompletableFuture getBucketAnalyticsConfiguration( Consumer getBucketAnalyticsConfigurationRequest) { return getBucketAnalyticsConfiguration(GetBucketAnalyticsConfigurationRequest.builder() .applyMutation(getBucketAnalyticsConfigurationRequest).build()); } /** *

* Returns the cors configuration information set for the bucket. *

*

* To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket * owner has this permission and can grant it to others. *

*

* For more information about cors, see * Enabling Cross-Origin Resource Sharing. *

*

* The following operations are related to GetBucketCors: *

* * * @param getBucketCorsRequest * @return A Java Future containing the result of the GetBucketCors operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketCors */ default CompletableFuture getBucketCors(GetBucketCorsRequest getBucketCorsRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the cors configuration information set for the bucket. *

*

* To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket * owner has this permission and can grant it to others. *

*

* For more information about cors, see * Enabling Cross-Origin Resource Sharing. *

*

* The following operations are related to GetBucketCors: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketCorsRequest.Builder} avoiding the need to * create one manually via {@link GetBucketCorsRequest#builder()} *

* * @param getBucketCorsRequest * A {@link Consumer} that will call methods on {@link GetBucketCorsRequest.Builder} to create a request. * @return A Java Future containing the result of the GetBucketCors operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketCors */ default CompletableFuture getBucketCors(Consumer getBucketCorsRequest) { return getBucketCors(GetBucketCorsRequest.builder().applyMutation(getBucketCorsRequest).build()); } /** *

* Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default * encryption feature, see Amazon * S3 Default Bucket Encryption. *

*

* To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* The following operations are related to GetBucketEncryption: *

* * * @param getBucketEncryptionRequest * @return A Java Future containing the result of the GetBucketEncryption operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketEncryption */ default CompletableFuture getBucketEncryption( GetBucketEncryptionRequest getBucketEncryptionRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default * encryption feature, see Amazon * S3 Default Bucket Encryption. *

*

* To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* The following operations are related to GetBucketEncryption: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketEncryptionRequest.Builder} avoiding the * need to create one manually via {@link GetBucketEncryptionRequest#builder()} *

* * @param getBucketEncryptionRequest * A {@link Consumer} that will call methods on {@link GetBucketEncryptionRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetBucketEncryption operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketEncryption */ default CompletableFuture getBucketEncryption( Consumer getBucketEncryptionRequest) { return getBucketEncryption(GetBucketEncryptionRequest.builder().applyMutation(getBucketEncryptionRequest).build()); } /** *

* Gets the S3 Intelligent-Tiering configuration from the specified bucket. *

*

* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to * the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings by moving data between access tiers, when access patterns change. *

*

* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at * least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects * can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering * storage class. *

*

* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 * days. For more information, see Storage * class for automatically optimizing frequently and infrequently accessed objects. *

*

* Operations related to GetBucketIntelligentTieringConfiguration include: *

* * * @param getBucketIntelligentTieringConfigurationRequest * @return A Java Future containing the result of the GetBucketIntelligentTieringConfiguration operation returned by * the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketIntelligentTieringConfiguration */ default CompletableFuture getBucketIntelligentTieringConfiguration( GetBucketIntelligentTieringConfigurationRequest getBucketIntelligentTieringConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Gets the S3 Intelligent-Tiering configuration from the specified bucket. *

*

* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to * the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings by moving data between access tiers, when access patterns change. *

*

* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at * least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects * can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering * storage class. *

*

* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 * days. For more information, see Storage * class for automatically optimizing frequently and infrequently accessed objects. *

*

* Operations related to GetBucketIntelligentTieringConfiguration include: *

* *
*

* This is a convenience which creates an instance of the * {@link GetBucketIntelligentTieringConfigurationRequest.Builder} avoiding the need to create one manually via * {@link GetBucketIntelligentTieringConfigurationRequest#builder()} *

* * @param getBucketIntelligentTieringConfigurationRequest * A {@link Consumer} that will call methods on * {@link GetBucketIntelligentTieringConfigurationRequest.Builder} to create a request. * @return A Java Future containing the result of the GetBucketIntelligentTieringConfiguration operation returned by * the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketIntelligentTieringConfiguration */ default CompletableFuture getBucketIntelligentTieringConfiguration( Consumer getBucketIntelligentTieringConfigurationRequest) { return getBucketIntelligentTieringConfiguration(GetBucketIntelligentTieringConfigurationRequest.builder() .applyMutation(getBucketIntelligentTieringConfigurationRequest).build()); } /** *

* Returns an inventory configuration (identified by the inventory configuration ID) from the bucket. *

*

* To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. * The bucket owner has this permission by default and can grant this permission to others. For more information * about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory. *

*

* The following operations are related to GetBucketInventoryConfiguration: *

* * * @param getBucketInventoryConfigurationRequest * @return A Java Future containing the result of the GetBucketInventoryConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketInventoryConfiguration */ default CompletableFuture getBucketInventoryConfiguration( GetBucketInventoryConfigurationRequest getBucketInventoryConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Returns an inventory configuration (identified by the inventory configuration ID) from the bucket. *

*

* To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. * The bucket owner has this permission by default and can grant this permission to others. For more information * about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory. *

*

* The following operations are related to GetBucketInventoryConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketInventoryConfigurationRequest.Builder} * avoiding the need to create one manually via {@link GetBucketInventoryConfigurationRequest#builder()} *

* * @param getBucketInventoryConfigurationRequest * A {@link Consumer} that will call methods on {@link GetBucketInventoryConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the GetBucketInventoryConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketInventoryConfiguration */ default CompletableFuture getBucketInventoryConfiguration( Consumer getBucketInventoryConfigurationRequest) { return getBucketInventoryConfiguration(GetBucketInventoryConfigurationRequest.builder() .applyMutation(getBucketInventoryConfigurationRequest).build()); } /** * *

* Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or * more object tags, or a combination of both. Accordingly, this section describes the latest API. The response * describes the new filter element that you can use to specify a filter to select a subset of objects to which the * rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier * API description, see GetBucketLifecycle. *

*
*

* Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, * see Object Lifecycle * Management. *

*

* To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. * The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* GetBucketLifecycleConfiguration has the following special error: *

*
    *
  • *

    * Error code: NoSuchLifecycleConfiguration *

    *
      *
    • *

      * Description: The lifecycle configuration does not exist. *

      *
    • *
    • *

      * HTTP Status Code: 404 Not Found *

      *
    • *
    • *

      * SOAP Fault Code Prefix: Client *

      *
    • *
    *
  • *
*

* The following operations are related to GetBucketLifecycleConfiguration: *

* * * @param getBucketLifecycleConfigurationRequest * @return A Java Future containing the result of the GetBucketLifecycleConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketLifecycleConfiguration */ default CompletableFuture getBucketLifecycleConfiguration( GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest) { throw new UnsupportedOperationException(); } /** * *

* Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or * more object tags, or a combination of both. Accordingly, this section describes the latest API. The response * describes the new filter element that you can use to specify a filter to select a subset of objects to which the * rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier * API description, see GetBucketLifecycle. *

*
*

* Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, * see Object Lifecycle * Management. *

*

* To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. * The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* GetBucketLifecycleConfiguration has the following special error: *

*
    *
  • *

    * Error code: NoSuchLifecycleConfiguration *

    *
      *
    • *

      * Description: The lifecycle configuration does not exist. *

      *
    • *
    • *

      * HTTP Status Code: 404 Not Found *

      *
    • *
    • *

      * SOAP Fault Code Prefix: Client *

      *
    • *
    *
  • *
*

* The following operations are related to GetBucketLifecycleConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketLifecycleConfigurationRequest.Builder} * avoiding the need to create one manually via {@link GetBucketLifecycleConfigurationRequest#builder()} *

* * @param getBucketLifecycleConfigurationRequest * A {@link Consumer} that will call methods on {@link GetBucketLifecycleConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the GetBucketLifecycleConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketLifecycleConfiguration */ default CompletableFuture getBucketLifecycleConfiguration( Consumer getBucketLifecycleConfigurationRequest) { return getBucketLifecycleConfiguration(GetBucketLifecycleConfigurationRequest.builder() .applyMutation(getBucketLifecycleConfigurationRequest).build()); } /** *

* Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint * request parameter in a CreateBucket request. For more information, see CreateBucket. *

*

* To use this implementation of the operation, you must be the bucket owner. *

*

* The following operations are related to GetBucketLocation: *

* * * @param getBucketLocationRequest * @return A Java Future containing the result of the GetBucketLocation operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketLocation */ default CompletableFuture getBucketLocation(GetBucketLocationRequest getBucketLocationRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the Region the bucket resides in. You set the bucket's Region using the LocationConstraint * request parameter in a CreateBucket request. For more information, see CreateBucket. *

*

* To use this implementation of the operation, you must be the bucket owner. *

*

* The following operations are related to GetBucketLocation: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketLocationRequest.Builder} avoiding the need * to create one manually via {@link GetBucketLocationRequest#builder()} *

* * @param getBucketLocationRequest * A {@link Consumer} that will call methods on {@link GetBucketLocationRequest.Builder} to create a request. * @return A Java Future containing the result of the GetBucketLocation operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketLocation */ default CompletableFuture getBucketLocation( Consumer getBucketLocationRequest) { return getBucketLocation(GetBucketLocationRequest.builder().applyMutation(getBucketLocationRequest).build()); } /** *

* Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, * you must be the bucket owner. *

*

* The following operations are related to GetBucketLogging: *

* * * @param getBucketLoggingRequest * @return A Java Future containing the result of the GetBucketLogging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketLogging */ default CompletableFuture getBucketLogging(GetBucketLoggingRequest getBucketLoggingRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, * you must be the bucket owner. *

*

* The following operations are related to GetBucketLogging: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketLoggingRequest.Builder} avoiding the need * to create one manually via {@link GetBucketLoggingRequest#builder()} *

* * @param getBucketLoggingRequest * A {@link Consumer} that will call methods on {@link GetBucketLoggingRequest.Builder} to create a request. * @return A Java Future containing the result of the GetBucketLogging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketLogging */ default CompletableFuture getBucketLogging( Consumer getBucketLoggingRequest) { return getBucketLogging(GetBucketLoggingRequest.builder().applyMutation(getBucketLoggingRequest).build()); } /** *

* Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't * include the daily storage metrics. *

*

* To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon * CloudWatch. *

*

* The following operations are related to GetBucketMetricsConfiguration: *

* * * @param getBucketMetricsConfigurationRequest * @return A Java Future containing the result of the GetBucketMetricsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketMetricsConfiguration */ default CompletableFuture getBucketMetricsConfiguration( GetBucketMetricsConfigurationRequest getBucketMetricsConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't * include the daily storage metrics. *

*

* To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon * CloudWatch. *

*

* The following operations are related to GetBucketMetricsConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketMetricsConfigurationRequest.Builder} * avoiding the need to create one manually via {@link GetBucketMetricsConfigurationRequest#builder()} *

* * @param getBucketMetricsConfigurationRequest * A {@link Consumer} that will call methods on {@link GetBucketMetricsConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the GetBucketMetricsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketMetricsConfiguration */ default CompletableFuture getBucketMetricsConfiguration( Consumer getBucketMetricsConfigurationRequest) { return getBucketMetricsConfiguration(GetBucketMetricsConfigurationRequest.builder() .applyMutation(getBucketMetricsConfigurationRequest).build()); } /** *

* Returns the notification configuration of a bucket. *

*

* If notifications are not enabled on the bucket, the operation returns an empty * NotificationConfiguration element. *

*

* By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket * owner can use a bucket policy to grant permission to other users to read this configuration with the * s3:GetBucketNotification permission. *

*

* For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket * Events. For more information about bucket policies, see Using Bucket Policies. *

*

* The following operation is related to GetBucketNotification: *

* * * @param getBucketNotificationConfigurationRequest * @return A Java Future containing the result of the GetBucketNotificationConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketNotificationConfiguration */ default CompletableFuture getBucketNotificationConfiguration( GetBucketNotificationConfigurationRequest getBucketNotificationConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the notification configuration of a bucket. *

*

* If notifications are not enabled on the bucket, the operation returns an empty * NotificationConfiguration element. *

*

* By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket * owner can use a bucket policy to grant permission to other users to read this configuration with the * s3:GetBucketNotification permission. *

*

* For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket * Events. For more information about bucket policies, see Using Bucket Policies. *

*

* The following operation is related to GetBucketNotification: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketNotificationConfigurationRequest.Builder} * avoiding the need to create one manually via {@link GetBucketNotificationConfigurationRequest#builder()} *

* * @param getBucketNotificationConfigurationRequest * A {@link Consumer} that will call methods on {@link GetBucketNotificationConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the GetBucketNotificationConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketNotificationConfiguration */ default CompletableFuture getBucketNotificationConfiguration( Consumer getBucketNotificationConfigurationRequest) { return getBucketNotificationConfiguration(GetBucketNotificationConfigurationRequest.builder() .applyMutation(getBucketNotificationConfigurationRequest).build()); } /** *

* Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the * s3:GetBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a * Policy. *

*

* For information about Amazon S3 Object Ownership, see Using Object Ownership. *

*

* The following operations are related to GetBucketOwnershipControls: *

* * * @param getBucketOwnershipControlsRequest * @return A Java Future containing the result of the GetBucketOwnershipControls operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketOwnershipControls */ default CompletableFuture getBucketOwnershipControls( GetBucketOwnershipControlsRequest getBucketOwnershipControlsRequest) { throw new UnsupportedOperationException(); } /** *

* Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you must have the * s3:GetBucketOwnershipControls permission. For more information about Amazon S3 permissions, see Specifying Permissions in a * Policy. *

*

* For information about Amazon S3 Object Ownership, see Using Object Ownership. *

*

* The following operations are related to GetBucketOwnershipControls: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketOwnershipControlsRequest.Builder} avoiding * the need to create one manually via {@link GetBucketOwnershipControlsRequest#builder()} *

* * @param getBucketOwnershipControlsRequest * A {@link Consumer} that will call methods on {@link GetBucketOwnershipControlsRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetBucketOwnershipControls operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketOwnershipControls */ default CompletableFuture getBucketOwnershipControls( Consumer getBucketOwnershipControlsRequest) { return getBucketOwnershipControls(GetBucketOwnershipControlsRequest.builder() .applyMutation(getBucketOwnershipControlsRequest).build()); } /** *

* Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS * account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the * specified bucket and belong to the bucket owner's account in order to use this operation. *

*

* If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied * error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's * account, Amazon S3 returns a 405 Method Not Allowed error. *

* *

* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even * if the policy explicitly denies the root user the ability to perform this action. *

*
*

* For more information about bucket policies, see Using Bucket Policies and User * Policies. *

*

* The following operation is related to GetBucketPolicy: *

* * * @param getBucketPolicyRequest * @return A Java Future containing the result of the GetBucketPolicy operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketPolicy */ default CompletableFuture getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS * account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the * specified bucket and belong to the bucket owner's account in order to use this operation. *

*

* If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied * error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's * account, Amazon S3 returns a 405 Method Not Allowed error. *

* *

* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even * if the policy explicitly denies the root user the ability to perform this action. *

*
*

* For more information about bucket policies, see Using Bucket Policies and User * Policies. *

*

* The following operation is related to GetBucketPolicy: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketPolicyRequest.Builder} avoiding the need * to create one manually via {@link GetBucketPolicyRequest#builder()} *

* * @param getBucketPolicyRequest * A {@link Consumer} that will call methods on {@link GetBucketPolicyRequest.Builder} to create a request. * @return A Java Future containing the result of the GetBucketPolicy operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketPolicy */ default CompletableFuture getBucketPolicy( Consumer getBucketPolicyRequest) { return getBucketPolicy(GetBucketPolicyRequest.builder().applyMutation(getBucketPolicyRequest).build()); } /** *

* Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use * this operation, you must have the s3:GetBucketPolicyStatus permission. For more information about * Amazon S3 permissions, see Specifying Permissions in a * Policy. *

*

* For more information about when Amazon S3 considers a bucket public, see The Meaning of "Public". *

*

* The following operations are related to GetBucketPolicyStatus: *

* * * @param getBucketPolicyStatusRequest * @return A Java Future containing the result of the GetBucketPolicyStatus operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketPolicyStatus */ default CompletableFuture getBucketPolicyStatus( GetBucketPolicyStatusRequest getBucketPolicyStatusRequest) { throw new UnsupportedOperationException(); } /** *

* Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use * this operation, you must have the s3:GetBucketPolicyStatus permission. For more information about * Amazon S3 permissions, see Specifying Permissions in a * Policy. *

*

* For more information about when Amazon S3 considers a bucket public, see The Meaning of "Public". *

*

* The following operations are related to GetBucketPolicyStatus: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketPolicyStatusRequest.Builder} avoiding the * need to create one manually via {@link GetBucketPolicyStatusRequest#builder()} *

* * @param getBucketPolicyStatusRequest * A {@link Consumer} that will call methods on {@link GetBucketPolicyStatusRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetBucketPolicyStatus operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketPolicyStatus */ default CompletableFuture getBucketPolicyStatus( Consumer getBucketPolicyStatusRequest) { return getBucketPolicyStatus(GetBucketPolicyStatusRequest.builder().applyMutation(getBucketPolicyStatusRequest).build()); } /** *

* Returns the replication configuration of a bucket. *

* *

* It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. * Therefore, a get request soon after put or delete can return a wrong result. *

*
*

* For information about replication configuration, see Replication in the Amazon Simple * Storage Service Developer Guide. *

*

* This operation requires permissions for the s3:GetReplicationConfiguration action. For more * information about permissions, see Using Bucket Policies and User * Policies. *

*

* If you include the Filter element in a replication configuration, you must also include the * DeleteMarkerReplication and Priority elements. The response also returns those * elements. *

*

* For information about GetBucketReplication errors, see List of * replication-related error codes *

*

* The following operations are related to GetBucketReplication: *

* * * @param getBucketReplicationRequest * @return A Java Future containing the result of the GetBucketReplication operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketReplication */ default CompletableFuture getBucketReplication( GetBucketReplicationRequest getBucketReplicationRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the replication configuration of a bucket. *

* *

* It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. * Therefore, a get request soon after put or delete can return a wrong result. *

*
*

* For information about replication configuration, see Replication in the Amazon Simple * Storage Service Developer Guide. *

*

* This operation requires permissions for the s3:GetReplicationConfiguration action. For more * information about permissions, see Using Bucket Policies and User * Policies. *

*

* If you include the Filter element in a replication configuration, you must also include the * DeleteMarkerReplication and Priority elements. The response also returns those * elements. *

*

* For information about GetBucketReplication errors, see List of * replication-related error codes *

*

* The following operations are related to GetBucketReplication: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketReplicationRequest.Builder} avoiding the * need to create one manually via {@link GetBucketReplicationRequest#builder()} *

* * @param getBucketReplicationRequest * A {@link Consumer} that will call methods on {@link GetBucketReplicationRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetBucketReplication operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketReplication */ default CompletableFuture getBucketReplication( Consumer getBucketReplicationRequest) { return getBucketReplication(GetBucketReplicationRequest.builder().applyMutation(getBucketReplicationRequest).build()); } /** *

* Returns the request payment configuration of a bucket. To use this version of the operation, you must be the * bucket owner. For more information, see Requester Pays Buckets. *

*

* The following operations are related to GetBucketRequestPayment: *

* * * @param getBucketRequestPaymentRequest * @return A Java Future containing the result of the GetBucketRequestPayment operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketRequestPayment */ default CompletableFuture getBucketRequestPayment( GetBucketRequestPaymentRequest getBucketRequestPaymentRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the request payment configuration of a bucket. To use this version of the operation, you must be the * bucket owner. For more information, see Requester Pays Buckets. *

*

* The following operations are related to GetBucketRequestPayment: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketRequestPaymentRequest.Builder} avoiding * the need to create one manually via {@link GetBucketRequestPaymentRequest#builder()} *

* * @param getBucketRequestPaymentRequest * A {@link Consumer} that will call methods on {@link GetBucketRequestPaymentRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetBucketRequestPayment operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketRequestPayment */ default CompletableFuture getBucketRequestPayment( Consumer getBucketRequestPaymentRequest) { return getBucketRequestPayment(GetBucketRequestPaymentRequest.builder().applyMutation(getBucketRequestPaymentRequest) .build()); } /** *

* Returns the tag set associated with the bucket. *

*

* To use this operation, you must have permission to perform the s3:GetBucketTagging action. By * default, the bucket owner has this permission and can grant this permission to others. *

*

* GetBucketTagging has the following special error: *

*
    *
  • *

    * Error code: NoSuchTagSetError *

    *
      *
    • *

      * Description: There is no tag set associated with the bucket. *

      *
    • *
    *
  • *
*

* The following operations are related to GetBucketTagging: *

* * * @param getBucketTaggingRequest * @return A Java Future containing the result of the GetBucketTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketTagging */ default CompletableFuture getBucketTagging(GetBucketTaggingRequest getBucketTaggingRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the tag set associated with the bucket. *

*

* To use this operation, you must have permission to perform the s3:GetBucketTagging action. By * default, the bucket owner has this permission and can grant this permission to others. *

*

* GetBucketTagging has the following special error: *

*
    *
  • *

    * Error code: NoSuchTagSetError *

    *
      *
    • *

      * Description: There is no tag set associated with the bucket. *

      *
    • *
    *
  • *
*

* The following operations are related to GetBucketTagging: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketTaggingRequest.Builder} avoiding the need * to create one manually via {@link GetBucketTaggingRequest#builder()} *

* * @param getBucketTaggingRequest * A {@link Consumer} that will call methods on {@link GetBucketTaggingRequest.Builder} to create a request. * @return A Java Future containing the result of the GetBucketTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketTagging */ default CompletableFuture getBucketTagging( Consumer getBucketTaggingRequest) { return getBucketTagging(GetBucketTaggingRequest.builder().applyMutation(getBucketTaggingRequest).build()); } /** *

* Returns the versioning state of a bucket. *

*

* To retrieve the versioning state of a bucket, you must be the bucket owner. *

*

* This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is * enabled, the bucket owner must use an authentication device to change the versioning state of the * bucket. *

*

* The following operations are related to GetBucketVersioning: *

* * * @param getBucketVersioningRequest * @return A Java Future containing the result of the GetBucketVersioning operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketVersioning */ default CompletableFuture getBucketVersioning( GetBucketVersioningRequest getBucketVersioningRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the versioning state of a bucket. *

*

* To retrieve the versioning state of a bucket, you must be the bucket owner. *

*

* This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is * enabled, the bucket owner must use an authentication device to change the versioning state of the * bucket. *

*

* The following operations are related to GetBucketVersioning: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketVersioningRequest.Builder} avoiding the * need to create one manually via {@link GetBucketVersioningRequest#builder()} *

* * @param getBucketVersioningRequest * A {@link Consumer} that will call methods on {@link GetBucketVersioningRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetBucketVersioning operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketVersioning */ default CompletableFuture getBucketVersioning( Consumer getBucketVersioningRequest) { return getBucketVersioning(GetBucketVersioningRequest.builder().applyMutation(getBucketVersioningRequest).build()); } /** *

* Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as * website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3. *

*

* This GET operation requires the S3:GetBucketWebsite permission. By default, only the bucket owner * can read the bucket website configuration. However, bucket owners can allow other users to read the website * configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission. *

*

* The following operations are related to DeleteBucketWebsite: *

* * * @param getBucketWebsiteRequest * @return A Java Future containing the result of the GetBucketWebsite operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketWebsite */ default CompletableFuture getBucketWebsite(GetBucketWebsiteRequest getBucketWebsiteRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as * website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3. *

*

* This GET operation requires the S3:GetBucketWebsite permission. By default, only the bucket owner * can read the bucket website configuration. However, bucket owners can allow other users to read the website * configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission. *

*

* The following operations are related to DeleteBucketWebsite: *

* *
*

* This is a convenience which creates an instance of the {@link GetBucketWebsiteRequest.Builder} avoiding the need * to create one manually via {@link GetBucketWebsiteRequest#builder()} *

* * @param getBucketWebsiteRequest * A {@link Consumer} that will call methods on {@link GetBucketWebsiteRequest.Builder} to create a request. * @return A Java Future containing the result of the GetBucketWebsite operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetBucketWebsite */ default CompletableFuture getBucketWebsite( Consumer getBucketWebsiteRequest) { return getBucketWebsite(GetBucketWebsiteRequest.builder().applyMutation(getBucketWebsiteRequest).build()); } /** *

* Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. * If you grant READ access to the anonymous user, you can return the object without using an * authorization header. *

*

* An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, * however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead * of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. *

*

* To get an object from such a logical hierarchy, specify the full key name for the object in the GET * operation. For a virtual hosted-style request example, if you have the object * photos/2006/February/sample.jpg, specify the resource as * /photos/2006/February/sample.jpg. For a path-style request example, if you have the object * photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource * as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host * Header Bucket Specification. *

*

* To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, * see Amazon S3 Torrent. For more * information about returning the ACL of an object, see GetObjectAcl. *

*

* If the object you are retrieving is stored in the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering * Archive, or S3 Intelligent-Tiering Deep Archive storage classes, before you can retrieve the object you must * first restore a copy using RestoreObject. Otherwise, this * operation returns an InvalidObjectStateError error. For information about restoring archived * objects, see Restoring Archived * Objects. *

*

* Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with * Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 * BadRequest error. *

*

* If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you GET the object, you must use the following headers: *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys). *

*

* Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging * action), the response also returns the x-amz-tagging-count header that provides the count of number * of tags associated with the object. You can use GetObjectTagging to retrieve * the tag set associated with an object. *

*

* Permissions *

*

* You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a * Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also * have the s3:ListBucket permission. *

*
    *
  • *

    * If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code * 404 ("no such key") error. *

    *
  • *
  • *

    * If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 * ("access denied") error. *

    *
  • *
*

* Versioning *

*

* By default, the GET operation returns the current version of an object. To return a different version, use the * versionId subresource. *

* *

* If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and * includes x-amz-delete-marker: true in the response. *

*
*

* For more information about versioning, see PutBucketVersioning. *

*

* Overriding Response Header Values *

*

* There are times when you want to override certain response header values in a GET response. For example, you * might override the Content-Disposition response header value in your GET request. *

*

* You can override values for a set of response headers using the following query parameters. These response header * values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers * you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an * object. The response headers that you can override for the GET response are Content-Type, * Content-Language, Expires, Cache-Control, Content-Disposition * , and Content-Encoding. To override these header values in the GET response, you use the following * request parameters. *

* *

* You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. * They cannot be used with an unsigned (anonymous) request. *

*
*
    *
  • *

    * response-content-type *

    *
  • *
  • *

    * response-content-language *

    *
  • *
  • *

    * response-expires *

    *
  • *
  • *

    * response-cache-control *

    *
  • *
  • *

    * response-content-disposition *

    *
  • *
  • *

    * response-content-encoding *

    *
  • *
*

* Additional Considerations about Request Headers *

*

* If both of the If-Match and If-Unmodified-Since headers are present in the request as * follows: If-Match condition evaluates to true, and; If-Unmodified-Since * condition evaluates to false; then, S3 returns 200 OK and the data requested. *

*

* If both of the If-None-Match and If-Modified-Since headers are present in the request * as follows: If-None-Match condition evaluates to false, and; * If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified * response code. *

*

* For more information about conditional requests, see RFC 7232. *

*

* The following operations are related to GetObject: *

* * * @param getObjectRequest * @param asyncResponseTransformer * The response transformer for processing the streaming response in a non-blocking manner. See * {@link AsyncResponseTransformer} for details on how this callback should be implemented and for links to * precanned implementations for common scenarios like downloading to a file. The service documentation for * the response content is as follows ' *

* Object data. *

* '. * @return A future to the transformed result of the AsyncResponseTransformer.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • InvalidObjectStateException Object is archived and inaccessible until restored.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObject */ default CompletableFuture getObject(GetObjectRequest getObjectRequest, AsyncResponseTransformer asyncResponseTransformer) { throw new UnsupportedOperationException(); } /** *

* Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. * If you grant READ access to the anonymous user, you can return the object without using an * authorization header. *

*

* An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, * however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead * of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. *

*

* To get an object from such a logical hierarchy, specify the full key name for the object in the GET * operation. For a virtual hosted-style request example, if you have the object * photos/2006/February/sample.jpg, specify the resource as * /photos/2006/February/sample.jpg. For a path-style request example, if you have the object * photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource * as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host * Header Bucket Specification. *

*

* To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, * see Amazon S3 Torrent. For more * information about returning the ACL of an object, see GetObjectAcl. *

*

* If the object you are retrieving is stored in the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering * Archive, or S3 Intelligent-Tiering Deep Archive storage classes, before you can retrieve the object you must * first restore a copy using RestoreObject. Otherwise, this * operation returns an InvalidObjectStateError error. For information about restoring archived * objects, see Restoring Archived * Objects. *

*

* Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with * Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 * BadRequest error. *

*

* If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you GET the object, you must use the following headers: *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys). *

*

* Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging * action), the response also returns the x-amz-tagging-count header that provides the count of number * of tags associated with the object. You can use GetObjectTagging to retrieve * the tag set associated with an object. *

*

* Permissions *

*

* You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a * Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also * have the s3:ListBucket permission. *

*
    *
  • *

    * If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code * 404 ("no such key") error. *

    *
  • *
  • *

    * If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 * ("access denied") error. *

    *
  • *
*

* Versioning *

*

* By default, the GET operation returns the current version of an object. To return a different version, use the * versionId subresource. *

* *

* If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and * includes x-amz-delete-marker: true in the response. *

*
*

* For more information about versioning, see PutBucketVersioning. *

*

* Overriding Response Header Values *

*

* There are times when you want to override certain response header values in a GET response. For example, you * might override the Content-Disposition response header value in your GET request. *

*

* You can override values for a set of response headers using the following query parameters. These response header * values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers * you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an * object. The response headers that you can override for the GET response are Content-Type, * Content-Language, Expires, Cache-Control, Content-Disposition * , and Content-Encoding. To override these header values in the GET response, you use the following * request parameters. *

* *

* You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. * They cannot be used with an unsigned (anonymous) request. *

*
*
    *
  • *

    * response-content-type *

    *
  • *
  • *

    * response-content-language *

    *
  • *
  • *

    * response-expires *

    *
  • *
  • *

    * response-cache-control *

    *
  • *
  • *

    * response-content-disposition *

    *
  • *
  • *

    * response-content-encoding *

    *
  • *
*

* Additional Considerations about Request Headers *

*

* If both of the If-Match and If-Unmodified-Since headers are present in the request as * follows: If-Match condition evaluates to true, and; If-Unmodified-Since * condition evaluates to false; then, S3 returns 200 OK and the data requested. *

*

* If both of the If-None-Match and If-Modified-Since headers are present in the request * as follows: If-None-Match condition evaluates to false, and; * If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified * response code. *

*

* For more information about conditional requests, see RFC 7232. *

*

* The following operations are related to GetObject: *

* *
*

* This is a convenience which creates an instance of the {@link GetObjectRequest.Builder} avoiding the need to * create one manually via {@link GetObjectRequest#builder()} *

* * @param getObjectRequest * A {@link Consumer} that will call methods on {@link GetObjectRequest.Builder} to create a request. * @param asyncResponseTransformer * The response transformer for processing the streaming response in a non-blocking manner. See * {@link AsyncResponseTransformer} for details on how this callback should be implemented and for links to * precanned implementations for common scenarios like downloading to a file. The service documentation for * the response content is as follows ' *

* Object data. *

* '. * @return A future to the transformed result of the AsyncResponseTransformer.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • InvalidObjectStateException Object is archived and inaccessible until restored.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObject */ default CompletableFuture getObject(Consumer getObjectRequest, AsyncResponseTransformer asyncResponseTransformer) { return getObject(GetObjectRequest.builder().applyMutation(getObjectRequest).build(), asyncResponseTransformer); } /** *

* Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. * If you grant READ access to the anonymous user, you can return the object without using an * authorization header. *

*

* An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, * however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead * of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. *

*

* To get an object from such a logical hierarchy, specify the full key name for the object in the GET * operation. For a virtual hosted-style request example, if you have the object * photos/2006/February/sample.jpg, specify the resource as * /photos/2006/February/sample.jpg. For a path-style request example, if you have the object * photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource * as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host * Header Bucket Specification. *

*

* To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, * see Amazon S3 Torrent. For more * information about returning the ACL of an object, see GetObjectAcl. *

*

* If the object you are retrieving is stored in the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering * Archive, or S3 Intelligent-Tiering Deep Archive storage classes, before you can retrieve the object you must * first restore a copy using RestoreObject. Otherwise, this * operation returns an InvalidObjectStateError error. For information about restoring archived * objects, see Restoring Archived * Objects. *

*

* Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with * Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 * BadRequest error. *

*

* If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you GET the object, you must use the following headers: *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys). *

*

* Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging * action), the response also returns the x-amz-tagging-count header that provides the count of number * of tags associated with the object. You can use GetObjectTagging to retrieve * the tag set associated with an object. *

*

* Permissions *

*

* You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a * Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also * have the s3:ListBucket permission. *

*
    *
  • *

    * If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code * 404 ("no such key") error. *

    *
  • *
  • *

    * If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 * ("access denied") error. *

    *
  • *
*

* Versioning *

*

* By default, the GET operation returns the current version of an object. To return a different version, use the * versionId subresource. *

* *

* If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and * includes x-amz-delete-marker: true in the response. *

*
*

* For more information about versioning, see PutBucketVersioning. *

*

* Overriding Response Header Values *

*

* There are times when you want to override certain response header values in a GET response. For example, you * might override the Content-Disposition response header value in your GET request. *

*

* You can override values for a set of response headers using the following query parameters. These response header * values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers * you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an * object. The response headers that you can override for the GET response are Content-Type, * Content-Language, Expires, Cache-Control, Content-Disposition * , and Content-Encoding. To override these header values in the GET response, you use the following * request parameters. *

* *

* You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. * They cannot be used with an unsigned (anonymous) request. *

*
*
    *
  • *

    * response-content-type *

    *
  • *
  • *

    * response-content-language *

    *
  • *
  • *

    * response-expires *

    *
  • *
  • *

    * response-cache-control *

    *
  • *
  • *

    * response-content-disposition *

    *
  • *
  • *

    * response-content-encoding *

    *
  • *
*

* Additional Considerations about Request Headers *

*

* If both of the If-Match and If-Unmodified-Since headers are present in the request as * follows: If-Match condition evaluates to true, and; If-Unmodified-Since * condition evaluates to false; then, S3 returns 200 OK and the data requested. *

*

* If both of the If-None-Match and If-Modified-Since headers are present in the request * as follows: If-None-Match condition evaluates to false, and; * If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified * response code. *

*

* For more information about conditional requests, see RFC 7232. *

*

* The following operations are related to GetObject: *

* * * @param getObjectRequest * @param destinationPath * {@link Path} to file that response contents will be written to. The file must not exist or this method * will throw an exception. If the file is not writable by the current user then an exception will be thrown. * The service documentation for the response content is as follows ' *

* Object data. *

* '. * @return A future to the transformed result of the AsyncResponseTransformer.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • InvalidObjectStateException Object is archived and inaccessible until restored.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObject */ default CompletableFuture getObject(GetObjectRequest getObjectRequest, Path destinationPath) { return getObject(getObjectRequest, AsyncResponseTransformer.toFile(destinationPath)); } /** *

* Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. * If you grant READ access to the anonymous user, you can return the object without using an * authorization header. *

*

* An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, * however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead * of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. *

*

* To get an object from such a logical hierarchy, specify the full key name for the object in the GET * operation. For a virtual hosted-style request example, if you have the object * photos/2006/February/sample.jpg, specify the resource as * /photos/2006/February/sample.jpg. For a path-style request example, if you have the object * photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource * as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host * Header Bucket Specification. *

*

* To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, * see Amazon S3 Torrent. For more * information about returning the ACL of an object, see GetObjectAcl. *

*

* If the object you are retrieving is stored in the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering * Archive, or S3 Intelligent-Tiering Deep Archive storage classes, before you can retrieve the object you must * first restore a copy using RestoreObject. Otherwise, this * operation returns an InvalidObjectStateError error. For information about restoring archived * objects, see Restoring Archived * Objects. *

*

* Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with * Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 * BadRequest error. *

*

* If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you GET the object, you must use the following headers: *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys). *

*

* Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging * action), the response also returns the x-amz-tagging-count header that provides the count of number * of tags associated with the object. You can use GetObjectTagging to retrieve * the tag set associated with an object. *

*

* Permissions *

*

* You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a * Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also * have the s3:ListBucket permission. *

*
    *
  • *

    * If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code * 404 ("no such key") error. *

    *
  • *
  • *

    * If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 * ("access denied") error. *

    *
  • *
*

* Versioning *

*

* By default, the GET operation returns the current version of an object. To return a different version, use the * versionId subresource. *

* *

* If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and * includes x-amz-delete-marker: true in the response. *

*
*

* For more information about versioning, see PutBucketVersioning. *

*

* Overriding Response Header Values *

*

* There are times when you want to override certain response header values in a GET response. For example, you * might override the Content-Disposition response header value in your GET request. *

*

* You can override values for a set of response headers using the following query parameters. These response header * values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers * you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an * object. The response headers that you can override for the GET response are Content-Type, * Content-Language, Expires, Cache-Control, Content-Disposition * , and Content-Encoding. To override these header values in the GET response, you use the following * request parameters. *

* *

* You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. * They cannot be used with an unsigned (anonymous) request. *

*
*
    *
  • *

    * response-content-type *

    *
  • *
  • *

    * response-content-language *

    *
  • *
  • *

    * response-expires *

    *
  • *
  • *

    * response-cache-control *

    *
  • *
  • *

    * response-content-disposition *

    *
  • *
  • *

    * response-content-encoding *

    *
  • *
*

* Additional Considerations about Request Headers *

*

* If both of the If-Match and If-Unmodified-Since headers are present in the request as * follows: If-Match condition evaluates to true, and; If-Unmodified-Since * condition evaluates to false; then, S3 returns 200 OK and the data requested. *

*

* If both of the If-None-Match and If-Modified-Since headers are present in the request * as follows: If-None-Match condition evaluates to false, and; * If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified * response code. *

*

* For more information about conditional requests, see RFC 7232. *

*

* The following operations are related to GetObject: *

* *
*

* This is a convenience which creates an instance of the {@link GetObjectRequest.Builder} avoiding the need to * create one manually via {@link GetObjectRequest#builder()} *

* * @param getObjectRequest * A {@link Consumer} that will call methods on {@link GetObjectRequest.Builder} to create a request. * @param destinationPath * {@link Path} to file that response contents will be written to. The file must not exist or this method * will throw an exception. If the file is not writable by the current user then an exception will be thrown. * The service documentation for the response content is as follows ' *

* Object data. *

* '. * @return A future to the transformed result of the AsyncResponseTransformer.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • InvalidObjectStateException Object is archived and inaccessible until restored.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObject */ default CompletableFuture getObject(Consumer getObjectRequest, Path destinationPath) { return getObject(GetObjectRequest.builder().applyMutation(getObjectRequest).build(), destinationPath); } /** *

* Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP * access to the object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* Versioning *

*

* By default, GET returns ACL information about the current version of an object. To return ACL information about a * different version, use the versionId subresource. *

*

* The following operations are related to GetObjectAcl: *

* * * @param getObjectAclRequest * @return A Java Future containing the result of the GetObjectAcl operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectAcl */ default CompletableFuture getObjectAcl(GetObjectAclRequest getObjectAclRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP * access to the object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* Versioning *

*

* By default, GET returns ACL information about the current version of an object. To return ACL information about a * different version, use the versionId subresource. *

*

* The following operations are related to GetObjectAcl: *

* *
*

* This is a convenience which creates an instance of the {@link GetObjectAclRequest.Builder} avoiding the need to * create one manually via {@link GetObjectAclRequest#builder()} *

* * @param getObjectAclRequest * A {@link Consumer} that will call methods on {@link GetObjectAclRequest.Builder} to create a request. * @return A Java Future containing the result of the GetObjectAcl operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectAcl */ default CompletableFuture getObjectAcl(Consumer getObjectAclRequest) { return getObjectAcl(GetObjectAclRequest.builder().applyMutation(getObjectAclRequest).build()); } /** *

* Gets an object's current Legal Hold status. For more information, see Locking Objects. *

*

* This action is not supported by Amazon S3 on Outposts. *

* * @param getObjectLegalHoldRequest * @return A Java Future containing the result of the GetObjectLegalHold operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectLegalHold */ default CompletableFuture getObjectLegalHold(GetObjectLegalHoldRequest getObjectLegalHoldRequest) { throw new UnsupportedOperationException(); } /** *

* Gets an object's current Legal Hold status. For more information, see Locking Objects. *

*

* This action is not supported by Amazon S3 on Outposts. *

*
*

* This is a convenience which creates an instance of the {@link GetObjectLegalHoldRequest.Builder} avoiding the * need to create one manually via {@link GetObjectLegalHoldRequest#builder()} *

* * @param getObjectLegalHoldRequest * A {@link Consumer} that will call methods on {@link GetObjectLegalHoldRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetObjectLegalHold operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectLegalHold */ default CompletableFuture getObjectLegalHold( Consumer getObjectLegalHoldRequest) { return getObjectLegalHold(GetObjectLegalHoldRequest.builder().applyMutation(getObjectLegalHoldRequest).build()); } /** *

* Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be * applied by default to every new object placed in the specified bucket. For more information, see Locking Objects. *

* * @param getObjectLockConfigurationRequest * @return A Java Future containing the result of the GetObjectLockConfiguration operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectLockConfiguration */ default CompletableFuture getObjectLockConfiguration( GetObjectLockConfigurationRequest getObjectLockConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be * applied by default to every new object placed in the specified bucket. For more information, see Locking Objects. *

*
*

* This is a convenience which creates an instance of the {@link GetObjectLockConfigurationRequest.Builder} avoiding * the need to create one manually via {@link GetObjectLockConfigurationRequest#builder()} *

* * @param getObjectLockConfigurationRequest * A {@link Consumer} that will call methods on {@link GetObjectLockConfigurationRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetObjectLockConfiguration operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectLockConfiguration */ default CompletableFuture getObjectLockConfiguration( Consumer getObjectLockConfigurationRequest) { return getObjectLockConfiguration(GetObjectLockConfigurationRequest.builder() .applyMutation(getObjectLockConfigurationRequest).build()); } /** *

* Retrieves an object's retention settings. For more information, see Locking Objects. *

*

* This action is not supported by Amazon S3 on Outposts. *

* * @param getObjectRetentionRequest * @return A Java Future containing the result of the GetObjectRetention operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectRetention */ default CompletableFuture getObjectRetention(GetObjectRetentionRequest getObjectRetentionRequest) { throw new UnsupportedOperationException(); } /** *

* Retrieves an object's retention settings. For more information, see Locking Objects. *

*

* This action is not supported by Amazon S3 on Outposts. *

*
*

* This is a convenience which creates an instance of the {@link GetObjectRetentionRequest.Builder} avoiding the * need to create one manually via {@link GetObjectRetentionRequest#builder()} *

* * @param getObjectRetentionRequest * A {@link Consumer} that will call methods on {@link GetObjectRetentionRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetObjectRetention operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectRetention */ default CompletableFuture getObjectRetention( Consumer getObjectRetentionRequest) { return getObjectRetention(GetObjectRetentionRequest.builder().applyMutation(getObjectRetentionRequest).build()); } /** *

* Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the * object. *

*

* To use this operation, you must have permission to perform the s3:GetObjectTagging action. By * default, the GET operation returns information about current version of an object. For a versioned bucket, you * can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId * query parameter. You also need permission for the s3:GetObjectVersionTagging action. *

*

* By default, the bucket owner has this permission and can grant this permission to others. *

*

* For information about the Amazon S3 object tagging feature, see Object Tagging. *

*

* The following operation is related to GetObjectTagging: *

* * * @param getObjectTaggingRequest * @return A Java Future containing the result of the GetObjectTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectTagging */ default CompletableFuture getObjectTagging(GetObjectTaggingRequest getObjectTaggingRequest) { throw new UnsupportedOperationException(); } /** *

* Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the * object. *

*

* To use this operation, you must have permission to perform the s3:GetObjectTagging action. By * default, the GET operation returns information about current version of an object. For a versioned bucket, you * can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId * query parameter. You also need permission for the s3:GetObjectVersionTagging action. *

*

* By default, the bucket owner has this permission and can grant this permission to others. *

*

* For information about the Amazon S3 object tagging feature, see Object Tagging. *

*

* The following operation is related to GetObjectTagging: *

* *
*

* This is a convenience which creates an instance of the {@link GetObjectTaggingRequest.Builder} avoiding the need * to create one manually via {@link GetObjectTaggingRequest#builder()} *

* * @param getObjectTaggingRequest * A {@link Consumer} that will call methods on {@link GetObjectTaggingRequest.Builder} to create a request. * @return A Java Future containing the result of the GetObjectTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectTagging */ default CompletableFuture getObjectTagging( Consumer getObjectTaggingRequest) { return getObjectTagging(GetObjectTaggingRequest.builder().applyMutation(getObjectTaggingRequest).build()); } /** *

* Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For * more information about BitTorrent, see Using BitTorrent with Amazon S3. *

* *

* You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using * server-side encryption with a customer-provided encryption key. *

*
*

* To use GET, you must have READ access to the object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* The following operation is related to GetObjectTorrent: *

* * * @param getObjectTorrentRequest * @param asyncResponseTransformer * The response transformer for processing the streaming response in a non-blocking manner. See * {@link AsyncResponseTransformer} for details on how this callback should be implemented and for links to * precanned implementations for common scenarios like downloading to a file. The service documentation for * the response content is as follows ' *

* A Bencoded dictionary as defined by the BitTorrent specification *

* '. * @return A future to the transformed result of the AsyncResponseTransformer.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectTorrent */ default CompletableFuture getObjectTorrent(GetObjectTorrentRequest getObjectTorrentRequest, AsyncResponseTransformer asyncResponseTransformer) { throw new UnsupportedOperationException(); } /** *

* Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For * more information about BitTorrent, see Using BitTorrent with Amazon S3. *

* *

* You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using * server-side encryption with a customer-provided encryption key. *

*
*

* To use GET, you must have READ access to the object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* The following operation is related to GetObjectTorrent: *

* *
*

* This is a convenience which creates an instance of the {@link GetObjectTorrentRequest.Builder} avoiding the need * to create one manually via {@link GetObjectTorrentRequest#builder()} *

* * @param getObjectTorrentRequest * A {@link Consumer} that will call methods on {@link GetObjectTorrentRequest.Builder} to create a request. * @param asyncResponseTransformer * The response transformer for processing the streaming response in a non-blocking manner. See * {@link AsyncResponseTransformer} for details on how this callback should be implemented and for links to * precanned implementations for common scenarios like downloading to a file. The service documentation for * the response content is as follows ' *

* A Bencoded dictionary as defined by the BitTorrent specification *

* '. * @return A future to the transformed result of the AsyncResponseTransformer.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectTorrent */ default CompletableFuture getObjectTorrent( Consumer getObjectTorrentRequest, AsyncResponseTransformer asyncResponseTransformer) { return getObjectTorrent(GetObjectTorrentRequest.builder().applyMutation(getObjectTorrentRequest).build(), asyncResponseTransformer); } /** *

* Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For * more information about BitTorrent, see Using BitTorrent with Amazon S3. *

* *

* You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using * server-side encryption with a customer-provided encryption key. *

*
*

* To use GET, you must have READ access to the object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* The following operation is related to GetObjectTorrent: *

* * * @param getObjectTorrentRequest * @param destinationPath * {@link Path} to file that response contents will be written to. The file must not exist or this method * will throw an exception. If the file is not writable by the current user then an exception will be thrown. * The service documentation for the response content is as follows ' *

* A Bencoded dictionary as defined by the BitTorrent specification *

* '. * @return A future to the transformed result of the AsyncResponseTransformer.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectTorrent */ default CompletableFuture getObjectTorrent(GetObjectTorrentRequest getObjectTorrentRequest, Path destinationPath) { return getObjectTorrent(getObjectTorrentRequest, AsyncResponseTransformer.toFile(destinationPath)); } /** *

* Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For * more information about BitTorrent, see Using BitTorrent with Amazon S3. *

* *

* You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using * server-side encryption with a customer-provided encryption key. *

*
*

* To use GET, you must have READ access to the object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* The following operation is related to GetObjectTorrent: *

* *
*

* This is a convenience which creates an instance of the {@link GetObjectTorrentRequest.Builder} avoiding the need * to create one manually via {@link GetObjectTorrentRequest#builder()} *

* * @param getObjectTorrentRequest * A {@link Consumer} that will call methods on {@link GetObjectTorrentRequest.Builder} to create a request. * @param destinationPath * {@link Path} to file that response contents will be written to. The file must not exist or this method * will throw an exception. If the file is not writable by the current user then an exception will be thrown. * The service documentation for the response content is as follows ' *

* A Bencoded dictionary as defined by the BitTorrent specification *

* '. * @return A future to the transformed result of the AsyncResponseTransformer.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetObjectTorrent */ default CompletableFuture getObjectTorrent( Consumer getObjectTorrentRequest, Path destinationPath) { return getObjectTorrent(GetObjectTorrentRequest.builder().applyMutation(getObjectTorrentRequest).build(), destinationPath); } /** *

* Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you * must have the s3:GetBucketPublicAccessBlock permission. For more information about Amazon S3 * permissions, see Specifying * Permissions in a Policy. *

* *

* When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks * the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and * the bucket owner's account. If the PublicAccessBlock settings are different between the bucket and * the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings. *

*
*

* For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of "Public". *

*

* The following operations are related to GetPublicAccessBlock: *

* * * @param getPublicAccessBlockRequest * @return A Java Future containing the result of the GetPublicAccessBlock operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetPublicAccessBlock */ default CompletableFuture getPublicAccessBlock( GetPublicAccessBlockRequest getPublicAccessBlockRequest) { throw new UnsupportedOperationException(); } /** *

* Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you * must have the s3:GetBucketPublicAccessBlock permission. For more information about Amazon S3 * permissions, see Specifying * Permissions in a Policy. *

* *

* When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks * the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and * the bucket owner's account. If the PublicAccessBlock settings are different between the bucket and * the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings. *

*
*

* For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of "Public". *

*

* The following operations are related to GetPublicAccessBlock: *

* *
*

* This is a convenience which creates an instance of the {@link GetPublicAccessBlockRequest.Builder} avoiding the * need to create one manually via {@link GetPublicAccessBlockRequest#builder()} *

* * @param getPublicAccessBlockRequest * A {@link Consumer} that will call methods on {@link GetPublicAccessBlockRequest.Builder} to create a * request. * @return A Java Future containing the result of the GetPublicAccessBlock operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.GetPublicAccessBlock */ default CompletableFuture getPublicAccessBlock( Consumer getPublicAccessBlockRequest) { return getPublicAccessBlock(GetPublicAccessBlockRequest.builder().applyMutation(getPublicAccessBlockRequest).build()); } /** *

* This operation is useful to determine if a bucket exists and you have permission to access it. The operation * returns a 200 OK if the bucket exists and you have permission to access it. Otherwise, the operation * might return responses such as 404 Not Found and 403 Forbidden. *

*

* To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket * owner has this permission by default and can grant this permission to others. For more information about * permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

* * @param headBucketRequest * @return A Java Future containing the result of the HeadBucket operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchBucketException The specified bucket does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.HeadBucket */ default CompletableFuture headBucket(HeadBucketRequest headBucketRequest) { throw new UnsupportedOperationException(); } /** *

* This operation is useful to determine if a bucket exists and you have permission to access it. The operation * returns a 200 OK if the bucket exists and you have permission to access it. Otherwise, the operation * might return responses such as 404 Not Found and 403 Forbidden. *

*

* To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket * owner has this permission by default and can grant this permission to others. For more information about * permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*
*

* This is a convenience which creates an instance of the {@link HeadBucketRequest.Builder} avoiding the need to * create one manually via {@link HeadBucketRequest#builder()} *

* * @param headBucketRequest * A {@link Consumer} that will call methods on {@link HeadBucketRequest.Builder} to create a request. * @return A Java Future containing the result of the HeadBucket operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchBucketException The specified bucket does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.HeadBucket */ default CompletableFuture headBucket(Consumer headBucketRequest) { return headBucket(HeadBucketRequest.builder().applyMutation(headBucketRequest).build()); } /** *

* The HEAD operation retrieves metadata from an object without returning the object itself. This operation is * useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object. *

*

* A HEAD request has the same options as a GET operation on an object. The response is * identical to the GET response except that there is no response body. *

*

* If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following * headers: *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys). *

* *

* Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with * Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 * BadRequest error. *

*
*

* Request headers are limited to 8 KB in size. For more information, see Common Request Headers. *

*

* Consider the following when using request headers: *

*
    *
  • *

    * Consideration 1 – If both of the If-Match and If-Unmodified-Since headers are present * in the request as follows: *

    *
      *
    • *

      * If-Match condition evaluates to true, and; *

      *
    • *
    • *

      * If-Unmodified-Since condition evaluates to false; *

      *
    • *
    *

    * Then Amazon S3 returns 200 OK and the data requested. *

    *
  • *
  • *

    * Consideration 2 – If both of the If-None-Match and If-Modified-Since headers are * present in the request as follows: *

    *
      *
    • *

      * If-None-Match condition evaluates to false, and; *

      *
    • *
    • *

      * If-Modified-Since condition evaluates to true; *

      *
    • *
    *

    * Then Amazon S3 returns the 304 Not Modified response code. *

    *
  • *
*

* For more information about conditional requests, see RFC 7232. *

*

* Permissions *

*

* You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a * Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also * have the s3:ListBucket permission. *

*
    *
  • *

    * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 * ("no such key") error. *

    *
  • *
  • *

    * If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 * ("access denied") error. *

    *
  • *
*

* The following operation is related to HeadObject: *

* * * @param headObjectRequest * @return A Java Future containing the result of the HeadObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.HeadObject */ default CompletableFuture headObject(HeadObjectRequest headObjectRequest) { throw new UnsupportedOperationException(); } /** *

* The HEAD operation retrieves metadata from an object without returning the object itself. This operation is * useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object. *

*

* A HEAD request has the same options as a GET operation on an object. The response is * identical to the GET response except that there is no response body. *

*

* If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you * store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following * headers: *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* For more information about SSE-C, see Server-Side * Encryption (Using Customer-Provided Encryption Keys). *

* *

* Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with * Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 * BadRequest error. *

*
*

* Request headers are limited to 8 KB in size. For more information, see Common Request Headers. *

*

* Consider the following when using request headers: *

*
    *
  • *

    * Consideration 1 – If both of the If-Match and If-Unmodified-Since headers are present * in the request as follows: *

    *
      *
    • *

      * If-Match condition evaluates to true, and; *

      *
    • *
    • *

      * If-Unmodified-Since condition evaluates to false; *

      *
    • *
    *

    * Then Amazon S3 returns 200 OK and the data requested. *

    *
  • *
  • *

    * Consideration 2 – If both of the If-None-Match and If-Modified-Since headers are * present in the request as follows: *

    *
      *
    • *

      * If-None-Match condition evaluates to false, and; *

      *
    • *
    • *

      * If-Modified-Since condition evaluates to true; *

      *
    • *
    *

    * Then Amazon S3 returns the 304 Not Modified response code. *

    *
  • *
*

* For more information about conditional requests, see RFC 7232. *

*

* Permissions *

*

* You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a * Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also * have the s3:ListBucket permission. *

*
    *
  • *

    * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 * ("no such key") error. *

    *
  • *
  • *

    * If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 * ("access denied") error. *

    *
  • *
*

* The following operation is related to HeadObject: *

* *
*

* This is a convenience which creates an instance of the {@link HeadObjectRequest.Builder} avoiding the need to * create one manually via {@link HeadObjectRequest#builder()} *

* * @param headObjectRequest * A {@link Consumer} that will call methods on {@link HeadObjectRequest.Builder} to create a request. * @return A Java Future containing the result of the HeadObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.HeadObject */ default CompletableFuture headObject(Consumer headObjectRequest) { return headObject(HeadObjectRequest.builder().applyMutation(headObjectRequest).build()); } /** *

* Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket. *

*

* This operation supports list pagination and does not return more than 100 configurations at a time. You should * always check the IsTruncated element in the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to list, IsTruncated is * set to true, and there will be a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list by passing the value in * continuation-token in the request to GET the next page. *

*

* To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage * Class Analysis. *

*

* The following operations are related to ListBucketAnalyticsConfigurations: *

* * * @param listBucketAnalyticsConfigurationsRequest * @return A Java Future containing the result of the ListBucketAnalyticsConfigurations operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBucketAnalyticsConfigurations */ default CompletableFuture listBucketAnalyticsConfigurations( ListBucketAnalyticsConfigurationsRequest listBucketAnalyticsConfigurationsRequest) { throw new UnsupportedOperationException(); } /** *

* Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket. *

*

* This operation supports list pagination and does not return more than 100 configurations at a time. You should * always check the IsTruncated element in the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to list, IsTruncated is * set to true, and there will be a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list by passing the value in * continuation-token in the request to GET the next page. *

*

* To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage * Class Analysis. *

*

* The following operations are related to ListBucketAnalyticsConfigurations: *

* *
*

* This is a convenience which creates an instance of the {@link ListBucketAnalyticsConfigurationsRequest.Builder} * avoiding the need to create one manually via {@link ListBucketAnalyticsConfigurationsRequest#builder()} *

* * @param listBucketAnalyticsConfigurationsRequest * A {@link Consumer} that will call methods on {@link ListBucketAnalyticsConfigurationsRequest.Builder} to * create a request. * @return A Java Future containing the result of the ListBucketAnalyticsConfigurations operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBucketAnalyticsConfigurations */ default CompletableFuture listBucketAnalyticsConfigurations( Consumer listBucketAnalyticsConfigurationsRequest) { return listBucketAnalyticsConfigurations(ListBucketAnalyticsConfigurationsRequest.builder() .applyMutation(listBucketAnalyticsConfigurationsRequest).build()); } /** *

* Lists the S3 Intelligent-Tiering configuration from the specified bucket. *

*

* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to * the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings by moving data between access tiers, when access patterns change. *

*

* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at * least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects * can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering * storage class. *

*

* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 * days. For more information, see Storage * class for automatically optimizing frequently and infrequently accessed objects. *

*

* Operations related to ListBucketIntelligentTieringConfigurations include: *

* * * @param listBucketIntelligentTieringConfigurationsRequest * @return A Java Future containing the result of the ListBucketIntelligentTieringConfigurations operation returned * by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBucketIntelligentTieringConfigurations */ default CompletableFuture listBucketIntelligentTieringConfigurations( ListBucketIntelligentTieringConfigurationsRequest listBucketIntelligentTieringConfigurationsRequest) { throw new UnsupportedOperationException(); } /** *

* Lists the S3 Intelligent-Tiering configuration from the specified bucket. *

*

* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to * the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings by moving data between access tiers, when access patterns change. *

*

* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at * least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects * can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering * storage class. *

*

* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 * days. For more information, see Storage * class for automatically optimizing frequently and infrequently accessed objects. *

*

* Operations related to ListBucketIntelligentTieringConfigurations include: *

* *
*

* This is a convenience which creates an instance of the * {@link ListBucketIntelligentTieringConfigurationsRequest.Builder} avoiding the need to create one manually via * {@link ListBucketIntelligentTieringConfigurationsRequest#builder()} *

* * @param listBucketIntelligentTieringConfigurationsRequest * A {@link Consumer} that will call methods on * {@link ListBucketIntelligentTieringConfigurationsRequest.Builder} to create a request. * @return A Java Future containing the result of the ListBucketIntelligentTieringConfigurations operation returned * by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBucketIntelligentTieringConfigurations */ default CompletableFuture listBucketIntelligentTieringConfigurations( Consumer listBucketIntelligentTieringConfigurationsRequest) { return listBucketIntelligentTieringConfigurations(ListBucketIntelligentTieringConfigurationsRequest.builder() .applyMutation(listBucketIntelligentTieringConfigurationsRequest).build()); } /** *

* Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per * bucket. *

*

* This operation supports list pagination and does not return more than 100 configurations at a time. Always check * the IsTruncated element in the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to list, IsTruncated is * set to true, and there is a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list by passing the value in * continuation-token in the request to GET the next page. *

*

* To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory *

*

* The following operations are related to ListBucketInventoryConfigurations: *

* * * @param listBucketInventoryConfigurationsRequest * @return A Java Future containing the result of the ListBucketInventoryConfigurations operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBucketInventoryConfigurations */ default CompletableFuture listBucketInventoryConfigurations( ListBucketInventoryConfigurationsRequest listBucketInventoryConfigurationsRequest) { throw new UnsupportedOperationException(); } /** *

* Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per * bucket. *

*

* This operation supports list pagination and does not return more than 100 configurations at a time. Always check * the IsTruncated element in the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to list, IsTruncated is * set to true, and there is a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list by passing the value in * continuation-token in the request to GET the next page. *

*

* To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about the Amazon S3 inventory feature, see Amazon S3 Inventory *

*

* The following operations are related to ListBucketInventoryConfigurations: *

* *
*

* This is a convenience which creates an instance of the {@link ListBucketInventoryConfigurationsRequest.Builder} * avoiding the need to create one manually via {@link ListBucketInventoryConfigurationsRequest#builder()} *

* * @param listBucketInventoryConfigurationsRequest * A {@link Consumer} that will call methods on {@link ListBucketInventoryConfigurationsRequest.Builder} to * create a request. * @return A Java Future containing the result of the ListBucketInventoryConfigurations operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBucketInventoryConfigurations */ default CompletableFuture listBucketInventoryConfigurations( Consumer listBucketInventoryConfigurationsRequest) { return listBucketInventoryConfigurations(ListBucketInventoryConfigurationsRequest.builder() .applyMutation(listBucketInventoryConfigurationsRequest).build()); } /** *

* Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of * the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per * bucket. *

*

* This operation supports list pagination and does not return more than 100 configurations at a time. Always check * the IsTruncated element in the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to list, IsTruncated is * set to true, and there is a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list by passing the value in * continuation-token in the request to GET the next page. *

*

* To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon * CloudWatch. *

*

* The following operations are related to ListBucketMetricsConfigurations: *

* * * @param listBucketMetricsConfigurationsRequest * @return A Java Future containing the result of the ListBucketMetricsConfigurations operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBucketMetricsConfigurations */ default CompletableFuture listBucketMetricsConfigurations( ListBucketMetricsConfigurationsRequest listBucketMetricsConfigurationsRequest) { throw new UnsupportedOperationException(); } /** *

* Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of * the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per * bucket. *

*

* This operation supports list pagination and does not return more than 100 configurations at a time. Always check * the IsTruncated element in the response. If there are no more configurations to list, * IsTruncated is set to false. If there are more configurations to list, IsTruncated is * set to true, and there is a value in NextContinuationToken. You use the * NextContinuationToken value to continue the pagination of the list by passing the value in * continuation-token in the request to GET the next page. *

*

* To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon * CloudWatch. *

*

* The following operations are related to ListBucketMetricsConfigurations: *

* *
*

* This is a convenience which creates an instance of the {@link ListBucketMetricsConfigurationsRequest.Builder} * avoiding the need to create one manually via {@link ListBucketMetricsConfigurationsRequest#builder()} *

* * @param listBucketMetricsConfigurationsRequest * A {@link Consumer} that will call methods on {@link ListBucketMetricsConfigurationsRequest.Builder} to * create a request. * @return A Java Future containing the result of the ListBucketMetricsConfigurations operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBucketMetricsConfigurations */ default CompletableFuture listBucketMetricsConfigurations( Consumer listBucketMetricsConfigurationsRequest) { return listBucketMetricsConfigurations(ListBucketMetricsConfigurationsRequest.builder() .applyMutation(listBucketMetricsConfigurationsRequest).build()); } /** *

* Returns a list of all buckets owned by the authenticated sender of the request. *

* * @param listBucketsRequest * @return A Java Future containing the result of the ListBuckets operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBuckets */ default CompletableFuture listBuckets(ListBucketsRequest listBucketsRequest) { throw new UnsupportedOperationException(); } /** *

* Returns a list of all buckets owned by the authenticated sender of the request. *

*
*

* This is a convenience which creates an instance of the {@link ListBucketsRequest.Builder} avoiding the need to * create one manually via {@link ListBucketsRequest#builder()} *

* * @param listBucketsRequest * A {@link Consumer} that will call methods on {@link ListBucketsRequest.Builder} to create a request. * @return A Java Future containing the result of the ListBuckets operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBuckets */ default CompletableFuture listBuckets(Consumer listBucketsRequest) { return listBuckets(ListBucketsRequest.builder().applyMutation(listBucketsRequest).build()); } /** *

* Returns a list of all buckets owned by the authenticated sender of the request. *

* * @return A Java Future containing the result of the ListBuckets operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListBuckets */ default CompletableFuture listBuckets() { return listBuckets(ListBucketsRequest.builder().build()); } /** *

* This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that * has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. *

*

* This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum * number of uploads a response can include, which is also the default value. You can further limit the number of * uploads in a response by specifying the max-uploads parameter in the response. If additional * multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with * the value true. To list the additional multipart uploads, use the key-marker and * upload-id-marker request parameters. *

*

* In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload * using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted * in ascending order within each key by the upload initiation time. *

*

* For more information on multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information on permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to ListMultipartUploads: *

* * * @param listMultipartUploadsRequest * @return A Java Future containing the result of the ListMultipartUploads operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListMultipartUploads */ default CompletableFuture listMultipartUploads( ListMultipartUploadsRequest listMultipartUploadsRequest) { throw new UnsupportedOperationException(); } /** *

* This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that * has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. *

*

* This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum * number of uploads a response can include, which is also the default value. You can further limit the number of * uploads in a response by specifying the max-uploads parameter in the response. If additional * multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with * the value true. To list the additional multipart uploads, use the key-marker and * upload-id-marker request parameters. *

*

* In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload * using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted * in ascending order within each key by the upload initiation time. *

*

* For more information on multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information on permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to ListMultipartUploads: *

* *
*

* This is a convenience which creates an instance of the {@link ListMultipartUploadsRequest.Builder} avoiding the * need to create one manually via {@link ListMultipartUploadsRequest#builder()} *

* * @param listMultipartUploadsRequest * A {@link Consumer} that will call methods on {@link ListMultipartUploadsRequest.Builder} to create a * request. * @return A Java Future containing the result of the ListMultipartUploads operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListMultipartUploads */ default CompletableFuture listMultipartUploads( Consumer listMultipartUploadsRequest) { return listMultipartUploads(ListMultipartUploadsRequest.builder().applyMutation(listMultipartUploadsRequest).build()); } /** *

* This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that * has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. *

*

* This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum * number of uploads a response can include, which is also the default value. You can further limit the number of * uploads in a response by specifying the max-uploads parameter in the response. If additional * multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with * the value true. To list the additional multipart uploads, use the key-marker and * upload-id-marker request parameters. *

*

* In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload * using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted * in ascending order within each key by the upload initiation time. *

*

* For more information on multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information on permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to ListMultipartUploads: *

* *
*

* This is a variant of * {@link #listMultipartUploads(software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest)} operation. * The return type is a custom publisher that can be subscribed to request a stream of response pages. SDK will * internally handle making service calls for you. *

*

* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet * and so there is no guarantee that the request is valid. If there are errors in your request, you will see the * failures only after you start streaming the data. The subscribe method should be called as a request to start * streaming data. For more info, see * {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe * method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the * starting request. *

* *

* The following are few ways to use the response class: *

* 1) Using the subscribe helper method * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsPublisher publisher = client.listMultipartUploadsPaginator(request);
     * CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
     * future.get();
     * }
     * 
* * 2) Using a custom subscriber * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsPublisher publisher = client.listMultipartUploadsPaginator(request);
     * publisher.subscribe(new Subscriber() {
     * 
     * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
     * 
     * 
     * public void onNext(software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse response) { //... };
     * });}
     * 
* * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxUploads won't limit the number of results you get with the * paginator. It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listMultipartUploads(software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest)} * operation. *

* * @param listMultipartUploadsRequest * @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListMultipartUploads */ default ListMultipartUploadsPublisher listMultipartUploadsPaginator(ListMultipartUploadsRequest listMultipartUploadsRequest) { throw new UnsupportedOperationException(); } /** *

* This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that * has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted. *

*

* This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum * number of uploads a response can include, which is also the default value. You can further limit the number of * uploads in a response by specifying the max-uploads parameter in the response. If additional * multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with * the value true. To list the additional multipart uploads, use the key-marker and * upload-id-marker request parameters. *

*

* In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload * using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted * in ascending order within each key by the upload initiation time. *

*

* For more information on multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information on permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to ListMultipartUploads: *

* *
*

* This is a variant of * {@link #listMultipartUploads(software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest)} operation. * The return type is a custom publisher that can be subscribed to request a stream of response pages. SDK will * internally handle making service calls for you. *

*

* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet * and so there is no guarantee that the request is valid. If there are errors in your request, you will see the * failures only after you start streaming the data. The subscribe method should be called as a request to start * streaming data. For more info, see * {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe * method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the * starting request. *

* *

* The following are few ways to use the response class: *

* 1) Using the subscribe helper method * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsPublisher publisher = client.listMultipartUploadsPaginator(request);
     * CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
     * future.get();
     * }
     * 
* * 2) Using a custom subscriber * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListMultipartUploadsPublisher publisher = client.listMultipartUploadsPaginator(request);
     * publisher.subscribe(new Subscriber() {
     * 
     * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
     * 
     * 
     * public void onNext(software.amazon.awssdk.services.s3.model.ListMultipartUploadsResponse response) { //... };
     * });}
     * 
* * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxUploads won't limit the number of results you get with the * paginator. It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listMultipartUploads(software.amazon.awssdk.services.s3.model.ListMultipartUploadsRequest)} * operation. *

*

* This is a convenience which creates an instance of the {@link ListMultipartUploadsRequest.Builder} avoiding the * need to create one manually via {@link ListMultipartUploadsRequest#builder()} *

* * @param listMultipartUploadsRequest * A {@link Consumer} that will call methods on {@link ListMultipartUploadsRequest.Builder} to create a * request. * @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListMultipartUploads */ default ListMultipartUploadsPublisher listMultipartUploadsPaginator( Consumer listMultipartUploadsRequest) { return listMultipartUploadsPaginator(ListMultipartUploadsRequest.builder().applyMutation(listMultipartUploadsRequest) .build()); } /** *

* Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection * criteria to return metadata about a subset of all the object versions. *

* *

* A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of * the response and handle it appropriately. *

*
*

* To use this operation, you must have READ access to the bucket. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* The following operations are related to ListObjectVersions: *

* * * @param listObjectVersionsRequest * @return A Java Future containing the result of the ListObjectVersions operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjectVersions */ default CompletableFuture listObjectVersions(ListObjectVersionsRequest listObjectVersionsRequest) { throw new UnsupportedOperationException(); } /** *

* Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection * criteria to return metadata about a subset of all the object versions. *

* *

* A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of * the response and handle it appropriately. *

*
*

* To use this operation, you must have READ access to the bucket. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* The following operations are related to ListObjectVersions: *

* *
*

* This is a convenience which creates an instance of the {@link ListObjectVersionsRequest.Builder} avoiding the * need to create one manually via {@link ListObjectVersionsRequest#builder()} *

* * @param listObjectVersionsRequest * A {@link Consumer} that will call methods on {@link ListObjectVersionsRequest.Builder} to create a * request. * @return A Java Future containing the result of the ListObjectVersions operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjectVersions */ default CompletableFuture listObjectVersions( Consumer listObjectVersionsRequest) { return listObjectVersions(ListObjectVersionsRequest.builder().applyMutation(listObjectVersionsRequest).build()); } /** *

* Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection * criteria to return metadata about a subset of all the object versions. *

* *

* A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of * the response and handle it appropriately. *

*
*

* To use this operation, you must have READ access to the bucket. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* The following operations are related to ListObjectVersions: *

* *
*

* This is a variant of * {@link #listObjectVersions(software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest)} operation. The * return type is a custom publisher that can be subscribed to request a stream of response pages. SDK will * internally handle making service calls for you. *

*

* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet * and so there is no guarantee that the request is valid. If there are errors in your request, you will see the * failures only after you start streaming the data. The subscribe method should be called as a request to start * streaming data. For more info, see * {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe * method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the * starting request. *

* *

* The following are few ways to use the response class: *

* 1) Using the subscribe helper method * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListObjectVersionsPublisher publisher = client.listObjectVersionsPaginator(request);
     * CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
     * future.get();
     * }
     * 
* * 2) Using a custom subscriber * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListObjectVersionsPublisher publisher = client.listObjectVersionsPaginator(request);
     * publisher.subscribe(new Subscriber() {
     * 
     * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
     * 
     * 
     * public void onNext(software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse response) { //... };
     * });}
     * 
* * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxKeys won't limit the number of results you get with the paginator. * It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listObjectVersions(software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest)} operation. *

* * @param listObjectVersionsRequest * @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjectVersions */ default ListObjectVersionsPublisher listObjectVersionsPaginator(ListObjectVersionsRequest listObjectVersionsRequest) { throw new UnsupportedOperationException(); } /** *

* Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection * criteria to return metadata about a subset of all the object versions. *

* *

* A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of * the response and handle it appropriately. *

*
*

* To use this operation, you must have READ access to the bucket. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* The following operations are related to ListObjectVersions: *

* *
*

* This is a variant of * {@link #listObjectVersions(software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest)} operation. The * return type is a custom publisher that can be subscribed to request a stream of response pages. SDK will * internally handle making service calls for you. *

*

* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet * and so there is no guarantee that the request is valid. If there are errors in your request, you will see the * failures only after you start streaming the data. The subscribe method should be called as a request to start * streaming data. For more info, see * {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe * method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the * starting request. *

* *

* The following are few ways to use the response class: *

* 1) Using the subscribe helper method * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListObjectVersionsPublisher publisher = client.listObjectVersionsPaginator(request);
     * CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
     * future.get();
     * }
     * 
* * 2) Using a custom subscriber * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListObjectVersionsPublisher publisher = client.listObjectVersionsPaginator(request);
     * publisher.subscribe(new Subscriber() {
     * 
     * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
     * 
     * 
     * public void onNext(software.amazon.awssdk.services.s3.model.ListObjectVersionsResponse response) { //... };
     * });}
     * 
* * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxKeys won't limit the number of results you get with the paginator. * It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listObjectVersions(software.amazon.awssdk.services.s3.model.ListObjectVersionsRequest)} operation. *

*

* This is a convenience which creates an instance of the {@link ListObjectVersionsRequest.Builder} avoiding the * need to create one manually via {@link ListObjectVersionsRequest#builder()} *

* * @param listObjectVersionsRequest * A {@link Consumer} that will call methods on {@link ListObjectVersionsRequest.Builder} to create a * request. * @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjectVersions */ default ListObjectVersionsPublisher listObjectVersionsPaginator( Consumer listObjectVersionsRequest) { return listObjectVersionsPaginator(ListObjectVersionsRequest.builder().applyMutation(listObjectVersionsRequest).build()); } /** *

* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection * criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be * sure to design your application to parse the contents of the response and handle it appropriately. *

* *

* This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing * applications. For backward compatibility, Amazon S3 continues to support ListObjects. *

*
*

* The following operations are related to ListObjects: *

* * * @param listObjectsRequest * @return A Java Future containing the result of the ListObjects operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchBucketException The specified bucket does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjects */ default CompletableFuture listObjects(ListObjectsRequest listObjectsRequest) { throw new UnsupportedOperationException(); } /** *

* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection * criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be * sure to design your application to parse the contents of the response and handle it appropriately. *

* *

* This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing * applications. For backward compatibility, Amazon S3 continues to support ListObjects. *

*
*

* The following operations are related to ListObjects: *

* *
*

* This is a convenience which creates an instance of the {@link ListObjectsRequest.Builder} avoiding the need to * create one manually via {@link ListObjectsRequest#builder()} *

* * @param listObjectsRequest * A {@link Consumer} that will call methods on {@link ListObjectsRequest.Builder} to create a request. * @return A Java Future containing the result of the ListObjects operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchBucketException The specified bucket does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjects */ default CompletableFuture listObjects(Consumer listObjectsRequest) { return listObjects(ListObjectsRequest.builder().applyMutation(listObjectsRequest).build()); } /** *

* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection * criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or * invalid XML. Make sure to design your application to parse the contents of the response and handle it * appropriately. *

*

* To use this operation, you must have READ access to the bucket. *

*

* To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform * the s3:ListBucket action. The bucket owner has this permission by default and can grant this * permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

* *

* This section describes the latest revision of the API. We recommend that you use this revised API for application * development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects. *

*
*

* To get a list of your buckets, see ListBuckets. *

*

* The following operations are related to ListObjectsV2: *

* * * @param listObjectsV2Request * @return A Java Future containing the result of the ListObjectsV2 operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchBucketException The specified bucket does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjectsV2 */ default CompletableFuture listObjectsV2(ListObjectsV2Request listObjectsV2Request) { throw new UnsupportedOperationException(); } /** *

* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection * criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or * invalid XML. Make sure to design your application to parse the contents of the response and handle it * appropriately. *

*

* To use this operation, you must have READ access to the bucket. *

*

* To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform * the s3:ListBucket action. The bucket owner has this permission by default and can grant this * permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

* *

* This section describes the latest revision of the API. We recommend that you use this revised API for application * development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects. *

*
*

* To get a list of your buckets, see ListBuckets. *

*

* The following operations are related to ListObjectsV2: *

* *
*

* This is a convenience which creates an instance of the {@link ListObjectsV2Request.Builder} avoiding the need to * create one manually via {@link ListObjectsV2Request#builder()} *

* * @param listObjectsV2Request * A {@link Consumer} that will call methods on {@link ListObjectsV2Request.Builder} to create a request. * @return A Java Future containing the result of the ListObjectsV2 operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchBucketException The specified bucket does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjectsV2 */ default CompletableFuture listObjectsV2(Consumer listObjectsV2Request) { return listObjectsV2(ListObjectsV2Request.builder().applyMutation(listObjectsV2Request).build()); } /** *

* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection * criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or * invalid XML. Make sure to design your application to parse the contents of the response and handle it * appropriately. *

*

* To use this operation, you must have READ access to the bucket. *

*

* To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform * the s3:ListBucket action. The bucket owner has this permission by default and can grant this * permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

* *

* This section describes the latest revision of the API. We recommend that you use this revised API for application * development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects. *

*
*

* To get a list of your buckets, see ListBuckets. *

*

* The following operations are related to ListObjectsV2: *

* *
*

* This is a variant of {@link #listObjectsV2(software.amazon.awssdk.services.s3.model.ListObjectsV2Request)} * operation. The return type is a custom publisher that can be subscribed to request a stream of response pages. * SDK will internally handle making service calls for you. *

*

* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet * and so there is no guarantee that the request is valid. If there are errors in your request, you will see the * failures only after you start streaming the data. The subscribe method should be called as a request to start * streaming data. For more info, see * {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe * method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the * starting request. *

* *

* The following are few ways to use the response class: *

* 1) Using the subscribe helper method * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListObjectsV2Publisher publisher = client.listObjectsV2Paginator(request);
     * CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
     * future.get();
     * }
     * 
* * 2) Using a custom subscriber * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListObjectsV2Publisher publisher = client.listObjectsV2Paginator(request);
     * publisher.subscribe(new Subscriber() {
     * 
     * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
     * 
     * 
     * public void onNext(software.amazon.awssdk.services.s3.model.ListObjectsV2Response response) { //... };
     * });}
     * 
* * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxKeys won't limit the number of results you get with the paginator. * It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listObjectsV2(software.amazon.awssdk.services.s3.model.ListObjectsV2Request)} operation. *

* * @param listObjectsV2Request * @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchBucketException The specified bucket does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjectsV2 */ default ListObjectsV2Publisher listObjectsV2Paginator(ListObjectsV2Request listObjectsV2Request) { throw new UnsupportedOperationException(); } /** *

* Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection * criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or * invalid XML. Make sure to design your application to parse the contents of the response and handle it * appropriately. *

*

* To use this operation, you must have READ access to the bucket. *

*

* To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform * the s3:ListBucket action. The bucket owner has this permission by default and can grant this * permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

* *

* This section describes the latest revision of the API. We recommend that you use this revised API for application * development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects. *

*
*

* To get a list of your buckets, see ListBuckets. *

*

* The following operations are related to ListObjectsV2: *

* *
*

* This is a variant of {@link #listObjectsV2(software.amazon.awssdk.services.s3.model.ListObjectsV2Request)} * operation. The return type is a custom publisher that can be subscribed to request a stream of response pages. * SDK will internally handle making service calls for you. *

*

* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet * and so there is no guarantee that the request is valid. If there are errors in your request, you will see the * failures only after you start streaming the data. The subscribe method should be called as a request to start * streaming data. For more info, see * {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe * method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the * starting request. *

* *

* The following are few ways to use the response class: *

* 1) Using the subscribe helper method * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListObjectsV2Publisher publisher = client.listObjectsV2Paginator(request);
     * CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
     * future.get();
     * }
     * 
* * 2) Using a custom subscriber * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListObjectsV2Publisher publisher = client.listObjectsV2Paginator(request);
     * publisher.subscribe(new Subscriber() {
     * 
     * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
     * 
     * 
     * public void onNext(software.amazon.awssdk.services.s3.model.ListObjectsV2Response response) { //... };
     * });}
     * 
* * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxKeys won't limit the number of results you get with the paginator. * It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listObjectsV2(software.amazon.awssdk.services.s3.model.ListObjectsV2Request)} operation. *

*

* This is a convenience which creates an instance of the {@link ListObjectsV2Request.Builder} avoiding the need to * create one manually via {@link ListObjectsV2Request#builder()} *

* * @param listObjectsV2Request * A {@link Consumer} that will call methods on {@link ListObjectsV2Request.Builder} to create a request. * @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchBucketException The specified bucket does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListObjectsV2 */ default ListObjectsV2Publisher listObjectsV2Paginator(Consumer listObjectsV2Request) { return listObjectsV2Paginator(ListObjectsV2Request.builder().applyMutation(listObjectsV2Request).build()); } /** *

* Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload * ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). * This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You * can restrict the number of parts returned by specifying the max-parts request parameter. If your * multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with * the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests * you can include the part-number-marker query string parameter and set its value to the * NextPartNumberMarker field value from the previous response. *

*

* For more information on multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information on permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to ListParts: *

* * * @param listPartsRequest * @return A Java Future containing the result of the ListParts operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListParts */ default CompletableFuture listParts(ListPartsRequest listPartsRequest) { throw new UnsupportedOperationException(); } /** *

* Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload * ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). * This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You * can restrict the number of parts returned by specifying the max-parts request parameter. If your * multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with * the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests * you can include the part-number-marker query string parameter and set its value to the * NextPartNumberMarker field value from the previous response. *

*

* For more information on multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information on permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to ListParts: *

* *
*

* This is a convenience which creates an instance of the {@link ListPartsRequest.Builder} avoiding the need to * create one manually via {@link ListPartsRequest#builder()} *

* * @param listPartsRequest * A {@link Consumer} that will call methods on {@link ListPartsRequest.Builder} to create a request. * @return A Java Future containing the result of the ListParts operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListParts */ default CompletableFuture listParts(Consumer listPartsRequest) { return listParts(ListPartsRequest.builder().applyMutation(listPartsRequest).build()); } /** *

* Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload * ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). * This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You * can restrict the number of parts returned by specifying the max-parts request parameter. If your * multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with * the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests * you can include the part-number-marker query string parameter and set its value to the * NextPartNumberMarker field value from the previous response. *

*

* For more information on multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information on permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to ListParts: *

* *
*

* This is a variant of {@link #listParts(software.amazon.awssdk.services.s3.model.ListPartsRequest)} operation. The * return type is a custom publisher that can be subscribed to request a stream of response pages. SDK will * internally handle making service calls for you. *

*

* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet * and so there is no guarantee that the request is valid. If there are errors in your request, you will see the * failures only after you start streaming the data. The subscribe method should be called as a request to start * streaming data. For more info, see * {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe * method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the * starting request. *

* *

* The following are few ways to use the response class: *

* 1) Using the subscribe helper method * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListPartsPublisher publisher = client.listPartsPaginator(request);
     * CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
     * future.get();
     * }
     * 
* * 2) Using a custom subscriber * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListPartsPublisher publisher = client.listPartsPaginator(request);
     * publisher.subscribe(new Subscriber() {
     * 
     * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
     * 
     * 
     * public void onNext(software.amazon.awssdk.services.s3.model.ListPartsResponse response) { //... };
     * });}
     * 
* * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxParts won't limit the number of results you get with the paginator. * It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listParts(software.amazon.awssdk.services.s3.model.ListPartsRequest)} operation. *

* * @param listPartsRequest * @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListParts */ default ListPartsPublisher listPartsPaginator(ListPartsRequest listPartsRequest) { throw new UnsupportedOperationException(); } /** *

* Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload * ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). * This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You * can restrict the number of parts returned by specifying the max-parts request parameter. If your * multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with * the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests * you can include the part-number-marker query string parameter and set its value to the * NextPartNumberMarker field value from the previous response. *

*

* For more information on multipart uploads, see Uploading Objects Using Multipart * Upload. *

*

* For information on permissions required to use the multipart upload API, see Multipart Upload API and * Permissions. *

*

* The following operations are related to ListParts: *

* *
*

* This is a variant of {@link #listParts(software.amazon.awssdk.services.s3.model.ListPartsRequest)} operation. The * return type is a custom publisher that can be subscribed to request a stream of response pages. SDK will * internally handle making service calls for you. *

*

* When the operation is called, an instance of this class is returned. At this point, no service calls are made yet * and so there is no guarantee that the request is valid. If there are errors in your request, you will see the * failures only after you start streaming the data. The subscribe method should be called as a request to start * streaming data. For more info, see * {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)}. Each call to the subscribe * method will result in a new {@link org.reactivestreams.Subscription} i.e., a new contract to stream data from the * starting request. *

* *

* The following are few ways to use the response class: *

* 1) Using the subscribe helper method * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListPartsPublisher publisher = client.listPartsPaginator(request);
     * CompletableFuture future = publisher.subscribe(res -> { // Do something with the response });
     * future.get();
     * }
     * 
* * 2) Using a custom subscriber * *
     * {@code
     * software.amazon.awssdk.services.s3.paginators.ListPartsPublisher publisher = client.listPartsPaginator(request);
     * publisher.subscribe(new Subscriber() {
     * 
     * public void onSubscribe(org.reactivestreams.Subscriber subscription) { //... };
     * 
     * 
     * public void onNext(software.amazon.awssdk.services.s3.model.ListPartsResponse response) { //... };
     * });}
     * 
* * As the response is a publisher, it can work well with third party reactive streams implementations like RxJava2. *

* Please notice that the configuration of MaxParts won't limit the number of results you get with the paginator. * It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listParts(software.amazon.awssdk.services.s3.model.ListPartsRequest)} operation. *

*

* This is a convenience which creates an instance of the {@link ListPartsRequest.Builder} avoiding the need to * create one manually via {@link ListPartsRequest#builder()} *

* * @param listPartsRequest * A {@link Consumer} that will call methods on {@link ListPartsRequest.Builder} to create a request. * @return A custom publisher that can be subscribed to request a stream of response pages.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.ListParts */ default ListPartsPublisher listPartsPaginator(Consumer listPartsRequest) { return listPartsPaginator(ListPartsRequest.builder().applyMutation(listPartsRequest).build()); } /** *

* Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level * feature that enables you to perform faster data transfers to Amazon S3. *

*

* To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket * owner has this permission by default. The bucket owner can grant this permission to others. For more information * about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* The Transfer Acceleration state of a bucket can be set to one of the following two values: *

*
    *
  • *

    * Enabled – Enables accelerated data transfers to the bucket. *

    *
  • *
  • *

    * Suspended – Disables accelerated data transfers to the bucket. *

    *
  • *
*

* The * GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket. *

*

* After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before * the data transfer rates to the bucket increase. *

*

* The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ("."). *

*

* For more information about transfer acceleration, see Transfer Acceleration. *

*

* The following operations are related to PutBucketAccelerateConfiguration: *

* * * @param putBucketAccelerateConfigurationRequest * @return A Java Future containing the result of the PutBucketAccelerateConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketAccelerateConfiguration */ default CompletableFuture putBucketAccelerateConfiguration( PutBucketAccelerateConfigurationRequest putBucketAccelerateConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level * feature that enables you to perform faster data transfers to Amazon S3. *

*

* To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket * owner has this permission by default. The bucket owner can grant this permission to others. For more information * about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* The Transfer Acceleration state of a bucket can be set to one of the following two values: *

*
    *
  • *

    * Enabled – Enables accelerated data transfers to the bucket. *

    *
  • *
  • *

    * Suspended – Disables accelerated data transfers to the bucket. *

    *
  • *
*

* The * GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket. *

*

* After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before * the data transfer rates to the bucket increase. *

*

* The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ("."). *

*

* For more information about transfer acceleration, see Transfer Acceleration. *

*

* The following operations are related to PutBucketAccelerateConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketAccelerateConfigurationRequest.Builder} * avoiding the need to create one manually via {@link PutBucketAccelerateConfigurationRequest#builder()} *

* * @param putBucketAccelerateConfigurationRequest * A {@link Consumer} that will call methods on {@link PutBucketAccelerateConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the PutBucketAccelerateConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketAccelerateConfiguration */ default CompletableFuture putBucketAccelerateConfiguration( Consumer putBucketAccelerateConfigurationRequest) { return putBucketAccelerateConfiguration(PutBucketAccelerateConfigurationRequest.builder() .applyMutation(putBucketAccelerateConfigurationRequest).build()); } /** *

* Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a * bucket, you must have WRITE_ACP permission. *

*

* You can use one of the following two ways to set a bucket's permissions: *

*
    *
  • *

    * Specify the ACL in the request body *

    *
  • *
  • *

    * Specify permissions using request headers *

    *
  • *
* *

* You cannot specify access permission using both the body and the request headers. *

*
*

* Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or * the headers. For example, if you have an existing application that updates a bucket ACL using the request body, * then you can continue to use that approach. *

*

* Access Permissions *

*

* You can set access permissions using one of the following methods: *

*
    *
  • *

    * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, * known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned * ACL name as the value of x-amz-acl. If you use this header, you cannot use other access * control-specific headers in your request. For more information, see Canned ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, * you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a * canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more * information, see Access Control List * (ACL) Overview. *

    *

    * You specify each grantee as a type=value pair, where the type is one of the following: *

    *
      *
    • *

      * id – if the value specified is the canonical user ID of an AWS account *

      *
    • *
    • *

      * uri – if you are granting permissions to a predefined group *

      *
    • *
    • *

      * emailAddress – if the value specified is the email address of an AWS account *

      * *

      * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

      *
        *
      • *

        * US East (N. Virginia) *

        *
      • *
      • *

        * US West (N. California) *

        *
      • *
      • *

        * US West (Oregon) *

        *
      • *
      • *

        * Asia Pacific (Singapore) *

        *
      • *
      • *

        * Asia Pacific (Sydney) *

        *
      • *
      • *

        * Asia Pacific (Tokyo) *

        *
      • *
      • *

        * Europe (Ireland) *

        *
      • *
      • *

        * South America (São Paulo) *

        *
      • *
      *

      * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

      *
    • *
    *

    * For example, the following x-amz-grant-write header grants create, overwrite, and delete objects * permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses. *

    *

    * x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", id="555566667777" *

    *
  • *
*

* You can use either a canned ACL or specify access permissions explicitly. You cannot do both. *

*

* Grantee Values *

*

* You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the * following ways: *

*
    *
  • *

    * By the person's ID: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee> *

    *

    * DisplayName is optional and ignored in the request *

    *
  • *
  • *

    * By URI: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

    *
  • *
  • *

    * By Email address: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress>lt;/Grantee> *

    *

    * The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the * CanonicalUser. *

    * *

    * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

    *
      *
    • *

      * US East (N. Virginia) *

      *
    • *
    • *

      * US West (N. California) *

      *
    • *
    • *

      * US West (Oregon) *

      *
    • *
    • *

      * Asia Pacific (Singapore) *

      *
    • *
    • *

      * Asia Pacific (Sydney) *

      *
    • *
    • *

      * Asia Pacific (Tokyo) *

      *
    • *
    • *

      * Europe (Ireland) *

      *
    • *
    • *

      * South America (São Paulo) *

      *
    • *
    *

    * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

    *
  • *
*

* Related Resources *

* * * @param putBucketAclRequest * @return A Java Future containing the result of the PutBucketAcl operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketAcl */ default CompletableFuture putBucketAcl(PutBucketAclRequest putBucketAclRequest) { throw new UnsupportedOperationException(); } /** *

* Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a * bucket, you must have WRITE_ACP permission. *

*

* You can use one of the following two ways to set a bucket's permissions: *

*
    *
  • *

    * Specify the ACL in the request body *

    *
  • *
  • *

    * Specify permissions using request headers *

    *
  • *
* *

* You cannot specify access permission using both the body and the request headers. *

*
*

* Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or * the headers. For example, if you have an existing application that updates a bucket ACL using the request body, * then you can continue to use that approach. *

*

* Access Permissions *

*

* You can set access permissions using one of the following methods: *

*
    *
  • *

    * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, * known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned * ACL name as the value of x-amz-acl. If you use this header, you cannot use other access * control-specific headers in your request. For more information, see Canned ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, * you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use the x-amz-acl header to set a * canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more * information, see Access Control List * (ACL) Overview. *

    *

    * You specify each grantee as a type=value pair, where the type is one of the following: *

    *
      *
    • *

      * id – if the value specified is the canonical user ID of an AWS account *

      *
    • *
    • *

      * uri – if you are granting permissions to a predefined group *

      *
    • *
    • *

      * emailAddress – if the value specified is the email address of an AWS account *

      * *

      * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

      *
        *
      • *

        * US East (N. Virginia) *

        *
      • *
      • *

        * US West (N. California) *

        *
      • *
      • *

        * US West (Oregon) *

        *
      • *
      • *

        * Asia Pacific (Singapore) *

        *
      • *
      • *

        * Asia Pacific (Sydney) *

        *
      • *
      • *

        * Asia Pacific (Tokyo) *

        *
      • *
      • *

        * Europe (Ireland) *

        *
      • *
      • *

        * South America (São Paulo) *

        *
      • *
      *

      * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

      *
    • *
    *

    * For example, the following x-amz-grant-write header grants create, overwrite, and delete objects * permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses. *

    *

    * x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", id="555566667777" *

    *
  • *
*

* You can use either a canned ACL or specify access permissions explicitly. You cannot do both. *

*

* Grantee Values *

*

* You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the * following ways: *

*
    *
  • *

    * By the person's ID: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee> *

    *

    * DisplayName is optional and ignored in the request *

    *
  • *
  • *

    * By URI: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

    *
  • *
  • *

    * By Email address: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress>lt;/Grantee> *

    *

    * The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the * CanonicalUser. *

    * *

    * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

    *
      *
    • *

      * US East (N. Virginia) *

      *
    • *
    • *

      * US West (N. California) *

      *
    • *
    • *

      * US West (Oregon) *

      *
    • *
    • *

      * Asia Pacific (Singapore) *

      *
    • *
    • *

      * Asia Pacific (Sydney) *

      *
    • *
    • *

      * Asia Pacific (Tokyo) *

      *
    • *
    • *

      * Europe (Ireland) *

      *
    • *
    • *

      * South America (São Paulo) *

      *
    • *
    *

    * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

    *
  • *
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketAclRequest.Builder} avoiding the need to * create one manually via {@link PutBucketAclRequest#builder()} *

* * @param putBucketAclRequest * A {@link Consumer} that will call methods on {@link PutBucketAclRequest.Builder} to create a request. * @return A Java Future containing the result of the PutBucketAcl operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketAcl */ default CompletableFuture putBucketAcl(Consumer putBucketAclRequest) { return putBucketAcl(PutBucketAclRequest.builder().applyMutation(putBucketAclRequest).build()); } /** *

* Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to * 1,000 analytics configurations per bucket. *

*

* You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat * file. See the DataExport request element. Reports are updated daily and are based on the object * filters that you configure. When selecting data export, you specify a destination bucket and an optional * destination prefix where the file is written. You can export the data to a destination bucket in a different * account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT * analytics configuration to. For more information, see Amazon S3 Analytics – Storage * Class Analysis. *

* *

* You must create a bucket policy on the destination bucket where the exported file is written to grant permissions * to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis. *

*
*

* To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* Special Errors *

*
    *
  • *
      *
    • *

      * HTTP Error: HTTP 400 Bad Request *

      *
    • *
    • *

      * Code: InvalidArgument *

      *
    • *
    • *

      * Cause: Invalid argument. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * HTTP Error: HTTP 400 Bad Request *

      *
    • *
    • *

      * Code: TooManyConfigurations *

      *
    • *
    • *

      * Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration * limit. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * HTTP Error: HTTP 403 Forbidden *

      *
    • *
    • *

      * Code: AccessDenied *

      *
    • *
    • *

      * Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration * bucket permission to set the configuration on the bucket. *

      *
    • *
    *
  • *
*

* Related Resources *

* * * @param putBucketAnalyticsConfigurationRequest * @return A Java Future containing the result of the PutBucketAnalyticsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketAnalyticsConfiguration */ default CompletableFuture putBucketAnalyticsConfiguration( PutBucketAnalyticsConfigurationRequest putBucketAnalyticsConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to * 1,000 analytics configurations per bucket. *

*

* You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat * file. See the DataExport request element. Reports are updated daily and are based on the object * filters that you configure. When selecting data export, you specify a destination bucket and an optional * destination prefix where the file is written. You can export the data to a destination bucket in a different * account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT * analytics configuration to. For more information, see Amazon S3 Analytics – Storage * Class Analysis. *

* *

* You must create a bucket policy on the destination bucket where the exported file is written to grant permissions * to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis. *

*
*

* To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* Special Errors *

*
    *
  • *
      *
    • *

      * HTTP Error: HTTP 400 Bad Request *

      *
    • *
    • *

      * Code: InvalidArgument *

      *
    • *
    • *

      * Cause: Invalid argument. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * HTTP Error: HTTP 400 Bad Request *

      *
    • *
    • *

      * Code: TooManyConfigurations *

      *
    • *
    • *

      * Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration * limit. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * HTTP Error: HTTP 403 Forbidden *

      *
    • *
    • *

      * Code: AccessDenied *

      *
    • *
    • *

      * Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration * bucket permission to set the configuration on the bucket. *

      *
    • *
    *
  • *
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketAnalyticsConfigurationRequest.Builder} * avoiding the need to create one manually via {@link PutBucketAnalyticsConfigurationRequest#builder()} *

* * @param putBucketAnalyticsConfigurationRequest * A {@link Consumer} that will call methods on {@link PutBucketAnalyticsConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the PutBucketAnalyticsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketAnalyticsConfiguration */ default CompletableFuture putBucketAnalyticsConfiguration( Consumer putBucketAnalyticsConfigurationRequest) { return putBucketAnalyticsConfiguration(PutBucketAnalyticsConfigurationRequest.builder() .applyMutation(putBucketAnalyticsConfigurationRequest).build()); } /** *

* Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it. *

*

* To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the * bucket owner has this permission and can grant it to others. *

*

* You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you * might want to enable a request whose origin is http://www.example.com to access your Amazon S3 * bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability. *

*

* To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the * bucket. The cors subresource is an XML document in which you configure rules that identify origins * and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size. *

*

* When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates * the cors configuration on the bucket and uses the first CORSRule rule that matches the * incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be * met: *

*
    *
  • *

    * The request's Origin header must match AllowedOrigin elements. *

    *
  • *
  • *

    * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method * header in case of a pre-flight OPTIONS request must be one of the AllowedMethod * elements. *

    *
  • *
  • *

    * Every header specified in the Access-Control-Request-Headers request header of a pre-flight request * must match an AllowedHeader element. *

    *
  • *
*

* For more information about CORS, go to Enabling Cross-Origin Resource Sharing in * the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* * * @param putBucketCorsRequest * @return A Java Future containing the result of the PutBucketCors operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketCors */ default CompletableFuture putBucketCors(PutBucketCorsRequest putBucketCorsRequest) { throw new UnsupportedOperationException(); } /** *

* Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it. *

*

* To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the * bucket owner has this permission and can grant it to others. *

*

* You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you * might want to enable a request whose origin is http://www.example.com to access your Amazon S3 * bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability. *

*

* To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the * bucket. The cors subresource is an XML document in which you configure rules that identify origins * and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size. *

*

* When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates * the cors configuration on the bucket and uses the first CORSRule rule that matches the * incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be * met: *

*
    *
  • *

    * The request's Origin header must match AllowedOrigin elements. *

    *
  • *
  • *

    * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method * header in case of a pre-flight OPTIONS request must be one of the AllowedMethod * elements. *

    *
  • *
  • *

    * Every header specified in the Access-Control-Request-Headers request header of a pre-flight request * must match an AllowedHeader element. *

    *
  • *
*

* For more information about CORS, go to Enabling Cross-Origin Resource Sharing in * the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketCorsRequest.Builder} avoiding the need to * create one manually via {@link PutBucketCorsRequest#builder()} *

* * @param putBucketCorsRequest * A {@link Consumer} that will call methods on {@link PutBucketCorsRequest.Builder} to create a request. * @return A Java Future containing the result of the PutBucketCors operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketCors */ default CompletableFuture putBucketCors(Consumer putBucketCorsRequest) { return putBucketCors(PutBucketCorsRequest.builder().applyMutation(putBucketCorsRequest).build()); } /** *

* This implementation of the PUT operation uses the encryption subresource to set the * default encryption state of an existing bucket. *

*

* This implementation of the PUT operation sets default encryption for a bucket using server-side * encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS). For information * about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket * Encryption. *

* *

* This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4). *

*
*

* To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration * action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. * For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* * * @param putBucketEncryptionRequest * @return A Java Future containing the result of the PutBucketEncryption operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketEncryption */ default CompletableFuture putBucketEncryption( PutBucketEncryptionRequest putBucketEncryptionRequest) { throw new UnsupportedOperationException(); } /** *

* This implementation of the PUT operation uses the encryption subresource to set the * default encryption state of an existing bucket. *

*

* This implementation of the PUT operation sets default encryption for a bucket using server-side * encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS). For information * about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket * Encryption. *

* *

* This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4). *

*
*

* To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration * action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. * For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketEncryptionRequest.Builder} avoiding the * need to create one manually via {@link PutBucketEncryptionRequest#builder()} *

* * @param putBucketEncryptionRequest * A {@link Consumer} that will call methods on {@link PutBucketEncryptionRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutBucketEncryption operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketEncryption */ default CompletableFuture putBucketEncryption( Consumer putBucketEncryptionRequest) { return putBucketEncryption(PutBucketEncryptionRequest.builder().applyMutation(putBucketEncryptionRequest).build()); } /** *

* Puts a S3 Intelligent-Tiering configuration to the specified bucket. *

*

* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to * the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings by moving data between access tiers, when access patterns change. *

*

* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at * least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects * can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering * storage class. *

*

* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 * days. For more information, see Storage * class for automatically optimizing frequently and infrequently accessed objects. *

*

* Operations related to PutBucketIntelligentTieringConfiguration include: *

* * * @param putBucketIntelligentTieringConfigurationRequest * @return A Java Future containing the result of the PutBucketIntelligentTieringConfiguration operation returned by * the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketIntelligentTieringConfiguration */ default CompletableFuture putBucketIntelligentTieringConfiguration( PutBucketIntelligentTieringConfigurationRequest putBucketIntelligentTieringConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Puts a S3 Intelligent-Tiering configuration to the specified bucket. *

*

* The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to * the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering * delivers automatic cost savings by moving data between access tiers, when access patterns change. *

*

* The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at * least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects * can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering * storage class. *

*

* If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 * days. For more information, see Storage * class for automatically optimizing frequently and infrequently accessed objects. *

*

* Operations related to PutBucketIntelligentTieringConfiguration include: *

* *
*

* This is a convenience which creates an instance of the * {@link PutBucketIntelligentTieringConfigurationRequest.Builder} avoiding the need to create one manually via * {@link PutBucketIntelligentTieringConfigurationRequest#builder()} *

* * @param putBucketIntelligentTieringConfigurationRequest * A {@link Consumer} that will call methods on * {@link PutBucketIntelligentTieringConfigurationRequest.Builder} to create a request. * @return A Java Future containing the result of the PutBucketIntelligentTieringConfiguration operation returned by * the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketIntelligentTieringConfiguration */ default CompletableFuture putBucketIntelligentTieringConfiguration( Consumer putBucketIntelligentTieringConfigurationRequest) { return putBucketIntelligentTieringConfiguration(PutBucketIntelligentTieringConfigurationRequest.builder() .applyMutation(putBucketIntelligentTieringConfigurationRequest).build()); } /** *

* This implementation of the PUT operation adds an inventory configuration (identified by the * inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket. *

*

* Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the * results are published to a flat file. The bucket that is inventoried is called the source bucket, and the * bucket where the inventory flat file is stored is called the destination bucket. The destination * bucket must be in the same AWS Region as the source bucket. *

*

* When you configure an inventory for a source bucket, you specify the destination bucket where you * want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure * what object metadata to include and whether to inventory all object versions or only current versions. For more * information, see Amazon S3 * Inventory in the Amazon Simple Storage Service Developer Guide. *

* *

* You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write * objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis. *

*
*

* To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. * The bucket owner has this permission by default and can grant this permission to others. For more information * about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* Special Errors *

*
    *
  • *

    * HTTP 400 Bad Request Error *

    *
      *
    • *

      * Code: InvalidArgument *

      *
    • *
    • *

      * Cause: Invalid Argument *

      *
    • *
    *
  • *
  • *

    * HTTP 400 Bad Request Error *

    *
      *
    • *

      * Code: TooManyConfigurations *

      *
    • *
    • *

      * Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration * limit. *

      *
    • *
    *
  • *
  • *

    * HTTP 403 Forbidden Error *

    *
      *
    • *

      * Code: AccessDenied *

      *
    • *
    • *

      * Cause: You are not the owner of the specified bucket, or you do not have the * s3:PutInventoryConfiguration bucket permission to set the configuration on the bucket. *

      *
    • *
    *
  • *
*

* Related Resources *

* * * @param putBucketInventoryConfigurationRequest * @return A Java Future containing the result of the PutBucketInventoryConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketInventoryConfiguration */ default CompletableFuture putBucketInventoryConfiguration( PutBucketInventoryConfigurationRequest putBucketInventoryConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* This implementation of the PUT operation adds an inventory configuration (identified by the * inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket. *

*

* Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the * results are published to a flat file. The bucket that is inventoried is called the source bucket, and the * bucket where the inventory flat file is stored is called the destination bucket. The destination * bucket must be in the same AWS Region as the source bucket. *

*

* When you configure an inventory for a source bucket, you specify the destination bucket where you * want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure * what object metadata to include and whether to inventory all object versions or only current versions. For more * information, see Amazon S3 * Inventory in the Amazon Simple Storage Service Developer Guide. *

* *

* You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write * objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis. *

*
*

* To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. * The bucket owner has this permission by default and can grant this permission to others. For more information * about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* Special Errors *

*
    *
  • *

    * HTTP 400 Bad Request Error *

    *
      *
    • *

      * Code: InvalidArgument *

      *
    • *
    • *

      * Cause: Invalid Argument *

      *
    • *
    *
  • *
  • *

    * HTTP 400 Bad Request Error *

    *
      *
    • *

      * Code: TooManyConfigurations *

      *
    • *
    • *

      * Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration * limit. *

      *
    • *
    *
  • *
  • *

    * HTTP 403 Forbidden Error *

    *
      *
    • *

      * Code: AccessDenied *

      *
    • *
    • *

      * Cause: You are not the owner of the specified bucket, or you do not have the * s3:PutInventoryConfiguration bucket permission to set the configuration on the bucket. *

      *
    • *
    *
  • *
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketInventoryConfigurationRequest.Builder} * avoiding the need to create one manually via {@link PutBucketInventoryConfigurationRequest#builder()} *

* * @param putBucketInventoryConfigurationRequest * A {@link Consumer} that will call methods on {@link PutBucketInventoryConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the PutBucketInventoryConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketInventoryConfiguration */ default CompletableFuture putBucketInventoryConfiguration( Consumer putBucketInventoryConfigurationRequest) { return putBucketInventoryConfiguration(PutBucketInventoryConfigurationRequest.builder() .applyMutation(putBucketInventoryConfigurationRequest).build()); } /** *

* Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For * information about lifecycle configuration, see Managing Access Permissions to Your * Amazon S3 Resources. *

* *

* Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or * more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous * version of the API supported filtering based only on an object key name prefix, which is supported for backward * compatibility. For the related API description, see PutBucketLifecycle. *

*
*

* Rules *

*

* You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML * consisting of one or more rules. Each rule consists of the following: *

*
    *
  • *

    * Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, * object tags, or a combination of both. *

    *
  • *
  • *

    * Status whether the rule is in effect. *

    *
  • *
  • *

    * One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects * identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have * many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides * predefined actions that you can specify for current and noncurrent object versions. *

    *
  • *
*

* For more information, see Object Lifecycle Management * and Lifecycle Configuration * Elements. *

*

* Permissions *

*

* By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for * example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account * that created it) can access the resource. The resource owner can optionally grant access permissions to others by * writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission. *

*

* You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to * block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for * the following actions: *

*
    *
  • *

    * s3:DeleteObject *

    *
  • *
  • *

    * s3:DeleteObjectVersion *

    *
  • *
  • *

    * s3:PutLifecycleConfiguration *

    *
  • *
*

* For more information about permissions, see Managing Access Permissions to Your * Amazon S3 Resources. *

*

* The following are related to PutBucketLifecycleConfiguration: *

* * * @param putBucketLifecycleConfigurationRequest * @return A Java Future containing the result of the PutBucketLifecycleConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketLifecycleConfiguration */ default CompletableFuture putBucketLifecycleConfiguration( PutBucketLifecycleConfigurationRequest putBucketLifecycleConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For * information about lifecycle configuration, see Managing Access Permissions to Your * Amazon S3 Resources. *

* *

* Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or * more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous * version of the API supported filtering based only on an object key name prefix, which is supported for backward * compatibility. For the related API description, see PutBucketLifecycle. *

*
*

* Rules *

*

* You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML * consisting of one or more rules. Each rule consists of the following: *

*
    *
  • *

    * Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, * object tags, or a combination of both. *

    *
  • *
  • *

    * Status whether the rule is in effect. *

    *
  • *
  • *

    * One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects * identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have * many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides * predefined actions that you can specify for current and noncurrent object versions. *

    *
  • *
*

* For more information, see Object Lifecycle Management * and Lifecycle Configuration * Elements. *

*

* Permissions *

*

* By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for * example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account * that created it) can access the resource. The resource owner can optionally grant access permissions to others by * writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission. *

*

* You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to * block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for * the following actions: *

*
    *
  • *

    * s3:DeleteObject *

    *
  • *
  • *

    * s3:DeleteObjectVersion *

    *
  • *
  • *

    * s3:PutLifecycleConfiguration *

    *
  • *
*

* For more information about permissions, see Managing Access Permissions to Your * Amazon S3 Resources. *

*

* The following are related to PutBucketLifecycleConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketLifecycleConfigurationRequest.Builder} * avoiding the need to create one manually via {@link PutBucketLifecycleConfigurationRequest#builder()} *

* * @param putBucketLifecycleConfigurationRequest * A {@link Consumer} that will call methods on {@link PutBucketLifecycleConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the PutBucketLifecycleConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketLifecycleConfiguration */ default CompletableFuture putBucketLifecycleConfiguration( Consumer putBucketLifecycleConfigurationRequest) { return putBucketLifecycleConfiguration(PutBucketLifecycleConfigurationRequest.builder() .applyMutation(putBucketLifecycleConfigurationRequest).build()); } /** *

* Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging * parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status * of a bucket, you must be the bucket owner. *

*

* The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request * element to grant access to other people. The Permissions request element specifies the kind of * access the grantee has to the logs. *

*

* Grantee Values *

*

* You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the * following ways: *

*
    *
  • *

    * By the person's ID: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee> *

    *

    * DisplayName is optional and ignored in the request. *

    *
  • *
  • *

    * By Email address: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress></Grantee> *

    *

    * The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the * CanonicalUser. *

    *
  • *
  • *

    * By URI: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

    *
  • *
*

* To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty * BucketLoggingStatus request element: *

*

* <BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" /> *

*

* For more information about server access logging, see Server Access Logging. *

*

* For more information about creating a bucket, see CreateBucket. For more * information about returning the logging status of a bucket, see GetBucketLogging. *

*

* The following operations are related to PutBucketLogging: *

* * * @param putBucketLoggingRequest * @return A Java Future containing the result of the PutBucketLogging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketLogging */ default CompletableFuture putBucketLogging(PutBucketLoggingRequest putBucketLoggingRequest) { throw new UnsupportedOperationException(); } /** *

* Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging * parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status * of a bucket, you must be the bucket owner. *

*

* The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request * element to grant access to other people. The Permissions request element specifies the kind of * access the grantee has to the logs. *

*

* Grantee Values *

*

* You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the * following ways: *

*
    *
  • *

    * By the person's ID: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee> *

    *

    * DisplayName is optional and ignored in the request. *

    *
  • *
  • *

    * By Email address: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress></Grantee> *

    *

    * The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the * CanonicalUser. *

    *
  • *
  • *

    * By URI: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

    *
  • *
*

* To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty * BucketLoggingStatus request element: *

*

* <BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" /> *

*

* For more information about server access logging, see Server Access Logging. *

*

* For more information about creating a bucket, see CreateBucket. For more * information about returning the logging status of a bucket, see GetBucketLogging. *

*

* The following operations are related to PutBucketLogging: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketLoggingRequest.Builder} avoiding the need * to create one manually via {@link PutBucketLoggingRequest#builder()} *

* * @param putBucketLoggingRequest * A {@link Consumer} that will call methods on {@link PutBucketLoggingRequest.Builder} to create a request. * @return A Java Future containing the result of the PutBucketLogging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketLogging */ default CompletableFuture putBucketLogging( Consumer putBucketLoggingRequest) { return putBucketLogging(PutBucketLoggingRequest.builder().applyMutation(putBucketLoggingRequest).build()); } /** *

* Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 * metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full * replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are * erased. *

*

* To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon * CloudWatch. *

*

* The following operations are related to PutBucketMetricsConfiguration: *

* *

* GetBucketLifecycle has the following special error: *

*
    *
  • *

    * Error code: TooManyConfigurations *

    *
      *
    • *

      * Description: You are attempting to create a new configuration but have already reached the 1,000-configuration * limit. *

      *
    • *
    • *

      * HTTP Status Code: HTTP 400 Bad Request *

      *
    • *
    *
  • *
* * @param putBucketMetricsConfigurationRequest * @return A Java Future containing the result of the PutBucketMetricsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketMetricsConfiguration */ default CompletableFuture putBucketMetricsConfiguration( PutBucketMetricsConfigurationRequest putBucketMetricsConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 * metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full * replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are * erased. *

*

* To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. * The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more * information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon * CloudWatch. *

*

* The following operations are related to PutBucketMetricsConfiguration: *

* *

* GetBucketLifecycle has the following special error: *

*
    *
  • *

    * Error code: TooManyConfigurations *

    *
      *
    • *

      * Description: You are attempting to create a new configuration but have already reached the 1,000-configuration * limit. *

      *
    • *
    • *

      * HTTP Status Code: HTTP 400 Bad Request *

      *
    • *
    *
  • *
*
*

* This is a convenience which creates an instance of the {@link PutBucketMetricsConfigurationRequest.Builder} * avoiding the need to create one manually via {@link PutBucketMetricsConfigurationRequest#builder()} *

* * @param putBucketMetricsConfigurationRequest * A {@link Consumer} that will call methods on {@link PutBucketMetricsConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the PutBucketMetricsConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketMetricsConfiguration */ default CompletableFuture putBucketMetricsConfiguration( Consumer putBucketMetricsConfigurationRequest) { return putBucketMetricsConfiguration(PutBucketMetricsConfigurationRequest.builder() .applyMutation(putBucketMetricsConfigurationRequest).build()); } /** *

* Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event * Notifications. *

*

* Using this API, you can replace an existing notification configuration. The configuration is an XML file that * defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to * publish an event notification when it detects an event of the specified type. *

*

* By default, your bucket has no event notifications configured. That is, the notification configuration will be an * empty NotificationConfiguration. *

*

* <NotificationConfiguration> *

*

* </NotificationConfiguration> *

*

* This operation replaces the existing notification configuration with the configuration you include in the request * body. *

*

* After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) * or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to * publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the * Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more * information, see Configuring * Notifications for Amazon S3 Events. *

*

* You can disable notifications by adding the empty NotificationConfiguration element. *

*

* By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a * bucket policy to grant permission to other users to set this configuration with * s3:PutBucketNotification permission. *

* *

* The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS * topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon * S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 * will not add the configuration to your bucket. *

*
*

* Responses *

*

* If the configuration in the request body includes only one TopicConfiguration specifying only the * s3:ReducedRedundancyLostObject event type, the response will also include the * x-amz-sns-test-message-id header containing the message ID of the test notification sent to the * topic. *

*

* The following operation is related to PutBucketNotificationConfiguration: *

* * * @param putBucketNotificationConfigurationRequest * @return A Java Future containing the result of the PutBucketNotificationConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketNotificationConfiguration */ default CompletableFuture putBucketNotificationConfiguration( PutBucketNotificationConfigurationRequest putBucketNotificationConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event * Notifications. *

*

* Using this API, you can replace an existing notification configuration. The configuration is an XML file that * defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to * publish an event notification when it detects an event of the specified type. *

*

* By default, your bucket has no event notifications configured. That is, the notification configuration will be an * empty NotificationConfiguration. *

*

* <NotificationConfiguration> *

*

* </NotificationConfiguration> *

*

* This operation replaces the existing notification configuration with the configuration you include in the request * body. *

*

* After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) * or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to * publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the * Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more * information, see Configuring * Notifications for Amazon S3 Events. *

*

* You can disable notifications by adding the empty NotificationConfiguration element. *

*

* By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a * bucket policy to grant permission to other users to set this configuration with * s3:PutBucketNotification permission. *

* *

* The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS * topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon * S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 * will not add the configuration to your bucket. *

*
*

* Responses *

*

* If the configuration in the request body includes only one TopicConfiguration specifying only the * s3:ReducedRedundancyLostObject event type, the response will also include the * x-amz-sns-test-message-id header containing the message ID of the test notification sent to the * topic. *

*

* The following operation is related to PutBucketNotificationConfiguration: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketNotificationConfigurationRequest.Builder} * avoiding the need to create one manually via {@link PutBucketNotificationConfigurationRequest#builder()} *

* * @param putBucketNotificationConfigurationRequest * A {@link Consumer} that will call methods on {@link PutBucketNotificationConfigurationRequest.Builder} to * create a request. * @return A Java Future containing the result of the PutBucketNotificationConfiguration operation returned by the * service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketNotificationConfiguration */ default CompletableFuture putBucketNotificationConfiguration( Consumer putBucketNotificationConfigurationRequest) { return putBucketNotificationConfiguration(PutBucketNotificationConfigurationRequest.builder() .applyMutation(putBucketNotificationConfigurationRequest).build()); } /** *

* Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have * the s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see * Specifying Permissions in a * Policy. *

*

* For information about Amazon S3 Object Ownership, see Using Object Ownership. *

*

* The following operations are related to PutBucketOwnershipControls: *

* * * @param putBucketOwnershipControlsRequest * @return A Java Future containing the result of the PutBucketOwnershipControls operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketOwnershipControls */ default CompletableFuture putBucketOwnershipControls( PutBucketOwnershipControlsRequest putBucketOwnershipControlsRequest) { throw new UnsupportedOperationException(); } /** *

* Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this operation, you must have * the s3:PutBucketOwnershipControls permission. For more information about Amazon S3 permissions, see * Specifying Permissions in a * Policy. *

*

* For information about Amazon S3 Object Ownership, see Using Object Ownership. *

*

* The following operations are related to PutBucketOwnershipControls: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketOwnershipControlsRequest.Builder} avoiding * the need to create one manually via {@link PutBucketOwnershipControlsRequest#builder()} *

* * @param putBucketOwnershipControlsRequest * A {@link Consumer} that will call methods on {@link PutBucketOwnershipControlsRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutBucketOwnershipControls operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketOwnershipControls */ default CompletableFuture putBucketOwnershipControls( Consumer putBucketOwnershipControlsRequest) { return putBucketOwnershipControls(PutBucketOwnershipControlsRequest.builder() .applyMutation(putBucketOwnershipControlsRequest).build()); } /** *

* Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user * of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy * permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. *

*

* If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied * error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's * account, Amazon S3 returns a 405 Method Not Allowed error. *

* *

* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even * if the policy explicitly denies the root user the ability to perform this action. *

*
*

* For more information about bucket policies, see Using Bucket Policies and User * Policies. *

*

* The following operations are related to PutBucketPolicy: *

* * * @param putBucketPolicyRequest * @return A Java Future containing the result of the PutBucketPolicy operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketPolicy */ default CompletableFuture putBucketPolicy(PutBucketPolicyRequest putBucketPolicyRequest) { throw new UnsupportedOperationException(); } /** *

* Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user * of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy * permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. *

*

* If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied * error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's * account, Amazon S3 returns a 405 Method Not Allowed error. *

* *

* As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even * if the policy explicitly denies the root user the ability to perform this action. *

*
*

* For more information about bucket policies, see Using Bucket Policies and User * Policies. *

*

* The following operations are related to PutBucketPolicy: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketPolicyRequest.Builder} avoiding the need * to create one manually via {@link PutBucketPolicyRequest#builder()} *

* * @param putBucketPolicyRequest * A {@link Consumer} that will call methods on {@link PutBucketPolicyRequest.Builder} to create a request. * @return A Java Future containing the result of the PutBucketPolicy operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketPolicy */ default CompletableFuture putBucketPolicy( Consumer putBucketPolicyRequest) { return putBucketPolicy(PutBucketPolicyRequest.builder().applyMutation(putBucketPolicyRequest).build()); } /** *

* Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 * Developer Guide. *

* *

* To perform this operation, the user or role performing the operation must have the iam:PassRole permission. *

*
*

* Specify the replication configuration in the request body. In the replication configuration, you provide the name * of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume * to replicate objects on your behalf, and other relevant information. *

*

* A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule * identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional * subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket. *

*

* To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as * a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or * both. When you add the Filter element in the configuration, you must also add the following elements: * DeleteMarkerReplication, Status, and Priority. *

* *

* The latest version of the replication configuration XML is V2. XML V2 replication configurations are those that * contain the Filter element for rules, and rules that specify S3 Replication Time Control (S3 RTC). * In XML V2 replication configurations, Amazon S3 doesn't replicate delete markers. Therefore, you must set the * DeleteMarkerReplication element to Disabled. For backward compatibility, Amazon S3 * continues to support the XML V1 replication configuration. *

*
*

* For information about enabling versioning on a bucket, see Using Versioning. *

*

* By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. * The resource owner can also grant others permissions to perform the operation. For more information about * permissions, see Specifying * Permissions in a Policy and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* Handling Replication of Encrypted Objects *

*

* By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs * stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: * SourceSelectionCriteria, SseKmsEncryptedObjects, Status, * EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication * configuration, see Replicating * Objects Created with SSE Using CMKs stored in AWS KMS. *

*

* For information on PutBucketReplication errors, see List of * replication-related error codes *

*

* The following operations are related to PutBucketReplication: *

* * * @param putBucketReplicationRequest * @return A Java Future containing the result of the PutBucketReplication operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketReplication */ default CompletableFuture putBucketReplication( PutBucketReplicationRequest putBucketReplicationRequest) { throw new UnsupportedOperationException(); } /** *

* Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 * Developer Guide. *

* *

* To perform this operation, the user or role performing the operation must have the iam:PassRole permission. *

*
*

* Specify the replication configuration in the request body. In the replication configuration, you provide the name * of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume * to replicate objects on your behalf, and other relevant information. *

*

* A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule * identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional * subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket. *

*

* To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as * a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or * both. When you add the Filter element in the configuration, you must also add the following elements: * DeleteMarkerReplication, Status, and Priority. *

* *

* The latest version of the replication configuration XML is V2. XML V2 replication configurations are those that * contain the Filter element for rules, and rules that specify S3 Replication Time Control (S3 RTC). * In XML V2 replication configurations, Amazon S3 doesn't replicate delete markers. Therefore, you must set the * DeleteMarkerReplication element to Disabled. For backward compatibility, Amazon S3 * continues to support the XML V1 replication configuration. *

*
*

* For information about enabling versioning on a bucket, see Using Versioning. *

*

* By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. * The resource owner can also grant others permissions to perform the operation. For more information about * permissions, see Specifying * Permissions in a Policy and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* Handling Replication of Encrypted Objects *

*

* By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs * stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: * SourceSelectionCriteria, SseKmsEncryptedObjects, Status, * EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication * configuration, see Replicating * Objects Created with SSE Using CMKs stored in AWS KMS. *

*

* For information on PutBucketReplication errors, see List of * replication-related error codes *

*

* The following operations are related to PutBucketReplication: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketReplicationRequest.Builder} avoiding the * need to create one manually via {@link PutBucketReplicationRequest#builder()} *

* * @param putBucketReplicationRequest * A {@link Consumer} that will call methods on {@link PutBucketReplicationRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutBucketReplication operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketReplication */ default CompletableFuture putBucketReplication( Consumer putBucketReplicationRequest) { return putBucketReplication(PutBucketReplicationRequest.builder().applyMutation(putBucketReplicationRequest).build()); } /** *

* Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the * bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the * download will be charged for the download. For more information, see Requester Pays Buckets. *

*

* The following operations are related to PutBucketRequestPayment: *

* * * @param putBucketRequestPaymentRequest * @return A Java Future containing the result of the PutBucketRequestPayment operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketRequestPayment */ default CompletableFuture putBucketRequestPayment( PutBucketRequestPaymentRequest putBucketRequestPaymentRequest) { throw new UnsupportedOperationException(); } /** *

* Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the * bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the * download will be charged for the download. For more information, see Requester Pays Buckets. *

*

* The following operations are related to PutBucketRequestPayment: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketRequestPaymentRequest.Builder} avoiding * the need to create one manually via {@link PutBucketRequestPaymentRequest#builder()} *

* * @param putBucketRequestPaymentRequest * A {@link Consumer} that will call methods on {@link PutBucketRequestPaymentRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutBucketRequestPayment operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketRequestPayment */ default CompletableFuture putBucketRequestPayment( Consumer putBucketRequestPaymentRequest) { return putBucketRequestPayment(PutBucketRequestPaymentRequest.builder().applyMutation(putBucketRequestPaymentRequest) .build()); } /** *

* Sets the tags for a bucket. *

*

* Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS * account bill with tag key values included. Then, to see the cost of combined resources, organize your billing * information according to resources with the same tag key values. For example, you can tag several resources with * a specific application name, and then organize your billing information to see the total cost of that application * across several services. For more information, see Cost Allocation and * Tagging. *

* *

* Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old * value. For more information, see Using Cost Allocation in Amazon S3 * Bucket Tags. *

*
*

* To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The * bucket owner has this permission by default and can grant this permission to others. For more information about * permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* PutBucketTagging has the following special errors: *

*
    *
  • *

    * Error code: InvalidTagError *

    * *
  • *
  • *

    * Error code: MalformedXMLError *

    *
      *
    • *

      * Description: The XML provided does not match the schema. *

      *
    • *
    *
  • *
  • *

    * Error code: OperationAbortedError *

    *
      *
    • *

      * Description: A conflicting conditional operation is currently in progress against this resource. Please try * again. *

      *
    • *
    *
  • *
  • *

    * Error code: InternalError *

    *
      *
    • *

      * Description: The service was unable to apply the provided tag to the bucket. *

      *
    • *
    *
  • *
*

* The following operations are related to PutBucketTagging: *

* * * @param putBucketTaggingRequest * @return A Java Future containing the result of the PutBucketTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketTagging */ default CompletableFuture putBucketTagging(PutBucketTaggingRequest putBucketTaggingRequest) { throw new UnsupportedOperationException(); } /** *

* Sets the tags for a bucket. *

*

* Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS * account bill with tag key values included. Then, to see the cost of combined resources, organize your billing * information according to resources with the same tag key values. For example, you can tag several resources with * a specific application name, and then organize your billing information to see the total cost of that application * across several services. For more information, see Cost Allocation and * Tagging. *

* *

* Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old * value. For more information, see Using Cost Allocation in Amazon S3 * Bucket Tags. *

*
*

* To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The * bucket owner has this permission by default and can grant this permission to others. For more information about * permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources. *

*

* PutBucketTagging has the following special errors: *

*
    *
  • *

    * Error code: InvalidTagError *

    * *
  • *
  • *

    * Error code: MalformedXMLError *

    *
      *
    • *

      * Description: The XML provided does not match the schema. *

      *
    • *
    *
  • *
  • *

    * Error code: OperationAbortedError *

    *
      *
    • *

      * Description: A conflicting conditional operation is currently in progress against this resource. Please try * again. *

      *
    • *
    *
  • *
  • *

    * Error code: InternalError *

    *
      *
    • *

      * Description: The service was unable to apply the provided tag to the bucket. *

      *
    • *
    *
  • *
*

* The following operations are related to PutBucketTagging: *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketTaggingRequest.Builder} avoiding the need * to create one manually via {@link PutBucketTaggingRequest#builder()} *

* * @param putBucketTaggingRequest * A {@link Consumer} that will call methods on {@link PutBucketTaggingRequest.Builder} to create a request. * @return A Java Future containing the result of the PutBucketTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketTagging */ default CompletableFuture putBucketTagging( Consumer putBucketTaggingRequest) { return putBucketTagging(PutBucketTaggingRequest.builder().applyMutation(putBucketTaggingRequest).build()); } /** *

* Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner. *

*

* You can set the versioning state with one of the following values: *

*

* Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique * version ID. *

*

* Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the * version ID null. *

*

* If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning * request does not return a versioning state value. *

*

* If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the * x-amz-mfa request header and the Status and the MfaDelete request elements * in a request to set the versioning state of the bucket. *

* *

* If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same * permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent * expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled * bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more * information, see Lifecycle and Versioning. *

*
*

* Related Resources *

* * * @param putBucketVersioningRequest * @return A Java Future containing the result of the PutBucketVersioning operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketVersioning */ default CompletableFuture putBucketVersioning( PutBucketVersioningRequest putBucketVersioningRequest) { throw new UnsupportedOperationException(); } /** *

* Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner. *

*

* You can set the versioning state with one of the following values: *

*

* Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique * version ID. *

*

* Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the * version ID null. *

*

* If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning * request does not return a versioning state value. *

*

* If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the * x-amz-mfa request header and the Status and the MfaDelete request elements * in a request to set the versioning state of the bucket. *

* *

* If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same * permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent * expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled * bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more * information, see Lifecycle and Versioning. *

*
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutBucketVersioningRequest.Builder} avoiding the * need to create one manually via {@link PutBucketVersioningRequest#builder()} *

* * @param putBucketVersioningRequest * A {@link Consumer} that will call methods on {@link PutBucketVersioningRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutBucketVersioning operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketVersioning */ default CompletableFuture putBucketVersioning( Consumer putBucketVersioningRequest) { return putBucketVersioning(PutBucketVersioningRequest.builder().applyMutation(putBucketVersioningRequest).build()); } /** *

* Sets the configuration of the website that is specified in the website subresource. To configure a * bucket as a website, you can add this subresource on the bucket with website configuration information such as * the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3. *

*

* This PUT operation requires the S3:PutBucketWebsite permission. By default, only the bucket owner * can configure the website attached to a bucket; however, bucket owners can allow other users to set the website * configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission. *

*

* To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the * following elements. Because all requests are sent to another website, you don't need to provide index document * name for the bucket. *

*
    *
  • *

    * WebsiteConfiguration *

    *
  • *
  • *

    * RedirectAllRequestsTo *

    *
  • *
  • *

    * HostName *

    *
  • *
  • *

    * Protocol *

    *
  • *
*

* If you want granular control over redirects, you can use the following elements to add routing rules that * describe conditions for redirecting requests and information about the redirect destination. In this case, the * website configuration must provide an index document for the bucket, because some requests might not be * redirected. *

*
    *
  • *

    * WebsiteConfiguration *

    *
  • *
  • *

    * IndexDocument *

    *
  • *
  • *

    * Suffix *

    *
  • *
  • *

    * ErrorDocument *

    *
  • *
  • *

    * Key *

    *
  • *
  • *

    * RoutingRules *

    *
  • *
  • *

    * RoutingRule *

    *
  • *
  • *

    * Condition *

    *
  • *
  • *

    * HttpErrorCodeReturnedEquals *

    *
  • *
  • *

    * KeyPrefixEquals *

    *
  • *
  • *

    * Redirect *

    *
  • *
  • *

    * Protocol *

    *
  • *
  • *

    * HostName *

    *
  • *
  • *

    * ReplaceKeyPrefixWith *

    *
  • *
  • *

    * ReplaceKeyWith *

    *
  • *
  • *

    * HttpRedirectCode *

    *
  • *
*

* Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing * rules, you can use object redirect. For more information, see Configuring an Object * Redirect in the Amazon Simple Storage Service Developer Guide. *

* * @param putBucketWebsiteRequest * @return A Java Future containing the result of the PutBucketWebsite operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketWebsite */ default CompletableFuture putBucketWebsite(PutBucketWebsiteRequest putBucketWebsiteRequest) { throw new UnsupportedOperationException(); } /** *

* Sets the configuration of the website that is specified in the website subresource. To configure a * bucket as a website, you can add this subresource on the bucket with website configuration information such as * the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3. *

*

* This PUT operation requires the S3:PutBucketWebsite permission. By default, only the bucket owner * can configure the website attached to a bucket; however, bucket owners can allow other users to set the website * configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission. *

*

* To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the * following elements. Because all requests are sent to another website, you don't need to provide index document * name for the bucket. *

*
    *
  • *

    * WebsiteConfiguration *

    *
  • *
  • *

    * RedirectAllRequestsTo *

    *
  • *
  • *

    * HostName *

    *
  • *
  • *

    * Protocol *

    *
  • *
*

* If you want granular control over redirects, you can use the following elements to add routing rules that * describe conditions for redirecting requests and information about the redirect destination. In this case, the * website configuration must provide an index document for the bucket, because some requests might not be * redirected. *

*
    *
  • *

    * WebsiteConfiguration *

    *
  • *
  • *

    * IndexDocument *

    *
  • *
  • *

    * Suffix *

    *
  • *
  • *

    * ErrorDocument *

    *
  • *
  • *

    * Key *

    *
  • *
  • *

    * RoutingRules *

    *
  • *
  • *

    * RoutingRule *

    *
  • *
  • *

    * Condition *

    *
  • *
  • *

    * HttpErrorCodeReturnedEquals *

    *
  • *
  • *

    * KeyPrefixEquals *

    *
  • *
  • *

    * Redirect *

    *
  • *
  • *

    * Protocol *

    *
  • *
  • *

    * HostName *

    *
  • *
  • *

    * ReplaceKeyPrefixWith *

    *
  • *
  • *

    * ReplaceKeyWith *

    *
  • *
  • *

    * HttpRedirectCode *

    *
  • *
*

* Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing * rules, you can use object redirect. For more information, see Configuring an Object * Redirect in the Amazon Simple Storage Service Developer Guide. *

*
*

* This is a convenience which creates an instance of the {@link PutBucketWebsiteRequest.Builder} avoiding the need * to create one manually via {@link PutBucketWebsiteRequest#builder()} *

* * @param putBucketWebsiteRequest * A {@link Consumer} that will call methods on {@link PutBucketWebsiteRequest.Builder} to create a request. * @return A Java Future containing the result of the PutBucketWebsite operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutBucketWebsite */ default CompletableFuture putBucketWebsite( Consumer putBucketWebsiteRequest) { return putBucketWebsite(PutBucketWebsiteRequest.builder().applyMutation(putBucketWebsiteRequest).build()); } /** *

* Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it. *

*

* Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the * bucket. *

*

* Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it * overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make * sure to build it into your application layer or use versioning instead. *

*

* To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you * use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an * error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag * to the calculated MD5 value. *

* *

* The Content-MD5 header is required for any request to upload an object with a retention period * configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock * Overview in the Amazon Simple Storage Service Developer Guide. *

*
*

* Server-side Encryption *

*

* You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as * it writes it to disks in its data centers and decrypts the data when you access it. You have the option to * provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side * Encryption. *

*

* Access Control List (ACL)-Specific Request Headers *

*

* You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full * access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined * groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see * Access Control List (ACL) * Overview and Managing ACLs * Using the REST API. *

*

* Storage Class Options *

*

* By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class * provides high durability and high availability. Depending on performance needs, you can specify a different * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the * Amazon S3 Service Developer Guide. *

*

* Versioning *

*

* If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being * stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives * multiple write requests for the same object simultaneously, it stores all of the objects. *

*

* For more information about versioning, see Adding * Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see * GetBucketVersioning. *

*

* Related Resources *

* * * @param putObjectRequest * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The * size of the content is expected to be known up front. See {@link AsyncRequestBody} for specific details on * implementing this interface as well as links to precanned implementations for common scenarios like * uploading from a file. The service documentation for the request content is as follows ' *

* Object data. *

* ' * @return A Java Future containing the result of the PutObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObject */ default CompletableFuture putObject(PutObjectRequest putObjectRequest, AsyncRequestBody requestBody) { throw new UnsupportedOperationException(); } /** *

* Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it. *

*

* Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the * bucket. *

*

* Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it * overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make * sure to build it into your application layer or use versioning instead. *

*

* To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you * use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an * error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag * to the calculated MD5 value. *

* *

* The Content-MD5 header is required for any request to upload an object with a retention period * configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock * Overview in the Amazon Simple Storage Service Developer Guide. *

*
*

* Server-side Encryption *

*

* You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as * it writes it to disks in its data centers and decrypts the data when you access it. You have the option to * provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side * Encryption. *

*

* Access Control List (ACL)-Specific Request Headers *

*

* You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full * access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined * groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see * Access Control List (ACL) * Overview and Managing ACLs * Using the REST API. *

*

* Storage Class Options *

*

* By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class * provides high durability and high availability. Depending on performance needs, you can specify a different * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the * Amazon S3 Service Developer Guide. *

*

* Versioning *

*

* If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being * stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives * multiple write requests for the same object simultaneously, it stores all of the objects. *

*

* For more information about versioning, see Adding * Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see * GetBucketVersioning. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutObjectRequest.Builder} avoiding the need to * create one manually via {@link PutObjectRequest#builder()} *

* * @param putObjectRequest * A {@link Consumer} that will call methods on {@link PutObjectRequest.Builder} to create a request. * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The * size of the content is expected to be known up front. See {@link AsyncRequestBody} for specific details on * implementing this interface as well as links to precanned implementations for common scenarios like * uploading from a file. The service documentation for the request content is as follows ' *

* Object data. *

* ' * @return A Java Future containing the result of the PutObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObject */ default CompletableFuture putObject(Consumer putObjectRequest, AsyncRequestBody requestBody) { return putObject(PutObjectRequest.builder().applyMutation(putObjectRequest).build(), requestBody); } /** *

* Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it. *

*

* Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the * bucket. *

*

* Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it * overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make * sure to build it into your application layer or use versioning instead. *

*

* To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you * use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an * error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag * to the calculated MD5 value. *

* *

* The Content-MD5 header is required for any request to upload an object with a retention period * configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock * Overview in the Amazon Simple Storage Service Developer Guide. *

*
*

* Server-side Encryption *

*

* You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as * it writes it to disks in its data centers and decrypts the data when you access it. You have the option to * provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side * Encryption. *

*

* Access Control List (ACL)-Specific Request Headers *

*

* You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full * access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined * groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see * Access Control List (ACL) * Overview and Managing ACLs * Using the REST API. *

*

* Storage Class Options *

*

* By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class * provides high durability and high availability. Depending on performance needs, you can specify a different * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the * Amazon S3 Service Developer Guide. *

*

* Versioning *

*

* If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being * stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives * multiple write requests for the same object simultaneously, it stores all of the objects. *

*

* For more information about versioning, see Adding * Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see * GetBucketVersioning. *

*

* Related Resources *

* * * @param putObjectRequest * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read * multiple times in the event of a retry. If the file does not exist or the current user does not have * access to read it then an exception will be thrown. The service documentation for the request content is * as follows ' *

* Object data. *

* ' * @return A Java Future containing the result of the PutObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObject */ default CompletableFuture putObject(PutObjectRequest putObjectRequest, Path sourcePath) { return putObject(putObjectRequest, AsyncRequestBody.fromFile(sourcePath)); } /** *

* Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it. *

*

* Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the * bucket. *

*

* Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it * overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make * sure to build it into your application layer or use versioning instead. *

*

* To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you * use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an * error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag * to the calculated MD5 value. *

* *

* The Content-MD5 header is required for any request to upload an object with a retention period * configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock * Overview in the Amazon Simple Storage Service Developer Guide. *

*
*

* Server-side Encryption *

*

* You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as * it writes it to disks in its data centers and decrypts the data when you access it. You have the option to * provide your own encryption key or use AWS managed encryption keys. For more information, see Using Server-Side * Encryption. *

*

* Access Control List (ACL)-Specific Request Headers *

*

* You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full * access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined * groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see * Access Control List (ACL) * Overview and Managing ACLs * Using the REST API. *

*

* Storage Class Options *

*

* By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class * provides high durability and high availability. Depending on performance needs, you can specify a different * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the * Amazon S3 Service Developer Guide. *

*

* Versioning *

*

* If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being * stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives * multiple write requests for the same object simultaneously, it stores all of the objects. *

*

* For more information about versioning, see Adding * Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see * GetBucketVersioning. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutObjectRequest.Builder} avoiding the need to * create one manually via {@link PutObjectRequest#builder()} *

* * @param putObjectRequest * A {@link Consumer} that will call methods on {@link PutObjectRequest.Builder} to create a request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read * multiple times in the event of a retry. If the file does not exist or the current user does not have * access to read it then an exception will be thrown. The service documentation for the request content is * as follows ' *

* Object data. *

* ' * @return A Java Future containing the result of the PutObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObject */ default CompletableFuture putObject(Consumer putObjectRequest, Path sourcePath) { return putObject(PutObjectRequest.builder().applyMutation(putObjectRequest).build(), sourcePath); } /** *

* Uses the acl subresource to set the access control list (ACL) permissions for a new or existing * object in an S3 bucket. You must have WRITE_ACP permission to set the ACL of an object. For more * information, see What * permissions can I grant? in the Amazon Simple Storage Service Developer Guide. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* Depending on your application needs, you can choose to set the ACL on an object using either the request body or * the headers. For example, if you have an existing application that updates a bucket ACL using the request body, * you can continue to use that approach. For more information, see Access Control List (ACL) Overview * in the Amazon S3 Developer Guide. *

*

* Access Permissions *

*

* You can set access permissions using one of the following methods: *

*
    *
  • *

    * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, * known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL * name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific * headers in your request. For more information, see Canned ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, * you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use x-amz-acl header to set a canned * ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see * Access Control List (ACL) * Overview. *

    *

    * You specify each grantee as a type=value pair, where the type is one of the following: *

    *
      *
    • *

      * id – if the value specified is the canonical user ID of an AWS account *

      *
    • *
    • *

      * uri – if you are granting permissions to a predefined group *

      *
    • *
    • *

      * emailAddress – if the value specified is the email address of an AWS account *

      * *

      * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

      *
        *
      • *

        * US East (N. Virginia) *

        *
      • *
      • *

        * US West (N. California) *

        *
      • *
      • *

        * US West (Oregon) *

        *
      • *
      • *

        * Asia Pacific (Singapore) *

        *
      • *
      • *

        * Asia Pacific (Sydney) *

        *
      • *
      • *

        * Asia Pacific (Tokyo) *

        *
      • *
      • *

        * Europe (Ireland) *

        *
      • *
      • *

        * South America (São Paulo) *

        *
      • *
      *

      * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

      *
    • *
    *

    * For example, the following x-amz-grant-read header grants list objects permission to the two AWS * accounts identified by their email addresses. *

    *

    * x-amz-grant-read: emailAddress="[email protected]", emailAddress="[email protected]" *

    *
  • *
*

* You can use either a canned ACL or specify access permissions explicitly. You cannot do both. *

*

* Grantee Values *

*

* You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the * following ways: *

*
    *
  • *

    * By the person's ID: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee> *

    *

    * DisplayName is optional and ignored in the request. *

    *
  • *
  • *

    * By URI: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

    *
  • *
  • *

    * By Email address: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress>lt;/Grantee> *

    *

    * The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the * CanonicalUser. *

    * *

    * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

    *
      *
    • *

      * US East (N. Virginia) *

      *
    • *
    • *

      * US West (N. California) *

      *
    • *
    • *

      * US West (Oregon) *

      *
    • *
    • *

      * Asia Pacific (Singapore) *

      *
    • *
    • *

      * Asia Pacific (Sydney) *

      *
    • *
    • *

      * Asia Pacific (Tokyo) *

      *
    • *
    • *

      * Europe (Ireland) *

      *
    • *
    • *

      * South America (São Paulo) *

      *
    • *
    *

    * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

    *
  • *
*

* Versioning *

*

* The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of * an object. To set the ACL of a different version, use the versionId subresource. *

*

* Related Resources *

* * * @param putObjectAclRequest * @return A Java Future containing the result of the PutObjectAcl operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectAcl */ default CompletableFuture putObjectAcl(PutObjectAclRequest putObjectAclRequest) { throw new UnsupportedOperationException(); } /** *

* Uses the acl subresource to set the access control list (ACL) permissions for a new or existing * object in an S3 bucket. You must have WRITE_ACP permission to set the ACL of an object. For more * information, see What * permissions can I grant? in the Amazon Simple Storage Service Developer Guide. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* Depending on your application needs, you can choose to set the ACL on an object using either the request body or * the headers. For example, if you have an existing application that updates a bucket ACL using the request body, * you can continue to use that approach. For more information, see Access Control List (ACL) Overview * in the Amazon S3 Developer Guide. *

*

* Access Permissions *

*

* You can set access permissions using one of the following methods: *

*
    *
  • *

    * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, * known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL * name as the value of x-amz-acl. If you use this header, you cannot use other access control-specific * headers in your request. For more information, see Canned ACL. *

    *
  • *
  • *

    * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers, * you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the * permission. If you use these ACL-specific headers, you cannot use x-amz-acl header to set a canned * ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see * Access Control List (ACL) * Overview. *

    *

    * You specify each grantee as a type=value pair, where the type is one of the following: *

    *
      *
    • *

      * id – if the value specified is the canonical user ID of an AWS account *

      *
    • *
    • *

      * uri – if you are granting permissions to a predefined group *

      *
    • *
    • *

      * emailAddress – if the value specified is the email address of an AWS account *

      * *

      * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

      *
        *
      • *

        * US East (N. Virginia) *

        *
      • *
      • *

        * US West (N. California) *

        *
      • *
      • *

        * US West (Oregon) *

        *
      • *
      • *

        * Asia Pacific (Singapore) *

        *
      • *
      • *

        * Asia Pacific (Sydney) *

        *
      • *
      • *

        * Asia Pacific (Tokyo) *

        *
      • *
      • *

        * Europe (Ireland) *

        *
      • *
      • *

        * South America (São Paulo) *

        *
      • *
      *

      * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

      *
    • *
    *

    * For example, the following x-amz-grant-read header grants list objects permission to the two AWS * accounts identified by their email addresses. *

    *

    * x-amz-grant-read: emailAddress="[email protected]", emailAddress="[email protected]" *

    *
  • *
*

* You can use either a canned ACL or specify access permissions explicitly. You cannot do both. *

*

* Grantee Values *

*

* You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the * following ways: *

*
    *
  • *

    * By the person's ID: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee> *

    *

    * DisplayName is optional and ignored in the request. *

    *
  • *
  • *

    * By URI: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="Group"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee> *

    *
  • *
  • *

    * By Email address: *

    *

    * <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="AmazonCustomerByEmail"><EmailAddress><>[email protected]<></EmailAddress>lt;/Grantee> *

    *

    * The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the * CanonicalUser. *

    * *

    * Using email addresses to specify a grantee is only supported in the following AWS Regions: *

    *
      *
    • *

      * US East (N. Virginia) *

      *
    • *
    • *

      * US West (N. California) *

      *
    • *
    • *

      * US West (Oregon) *

      *
    • *
    • *

      * Asia Pacific (Singapore) *

      *
    • *
    • *

      * Asia Pacific (Sydney) *

      *
    • *
    • *

      * Asia Pacific (Tokyo) *

      *
    • *
    • *

      * Europe (Ireland) *

      *
    • *
    • *

      * South America (São Paulo) *

      *
    • *
    *

    * For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS * General Reference. *

    *
  • *
*

* Versioning *

*

* The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of * an object. To set the ACL of a different version, use the versionId subresource. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutObjectAclRequest.Builder} avoiding the need to * create one manually via {@link PutObjectAclRequest#builder()} *

* * @param putObjectAclRequest * A {@link Consumer} that will call methods on {@link PutObjectAclRequest.Builder} to create a request. * @return A Java Future containing the result of the PutObjectAcl operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • NoSuchKeyException The specified key does not exist.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectAcl */ default CompletableFuture putObjectAcl(Consumer putObjectAclRequest) { return putObjectAcl(PutObjectAclRequest.builder().applyMutation(putObjectAclRequest).build()); } /** *

* Applies a Legal Hold configuration to the specified object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* Related Resources *

* * * @param putObjectLegalHoldRequest * @return A Java Future containing the result of the PutObjectLegalHold operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectLegalHold */ default CompletableFuture putObjectLegalHold(PutObjectLegalHoldRequest putObjectLegalHoldRequest) { throw new UnsupportedOperationException(); } /** *

* Applies a Legal Hold configuration to the specified object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutObjectLegalHoldRequest.Builder} avoiding the * need to create one manually via {@link PutObjectLegalHoldRequest#builder()} *

* * @param putObjectLegalHoldRequest * A {@link Consumer} that will call methods on {@link PutObjectLegalHoldRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutObjectLegalHold operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectLegalHold */ default CompletableFuture putObjectLegalHold( Consumer putObjectLegalHoldRequest) { return putObjectLegalHold(PutObjectLegalHoldRequest.builder().applyMutation(putObjectLegalHoldRequest).build()); } /** *

* Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration * will be applied by default to every new object placed in the specified bucket. *

* *

* DefaultRetention requires either Days or Years. You can't specify both at the same time. *

*
*

* Related Resources *

* * * @param putObjectLockConfigurationRequest * @return A Java Future containing the result of the PutObjectLockConfiguration operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectLockConfiguration */ default CompletableFuture putObjectLockConfiguration( PutObjectLockConfigurationRequest putObjectLockConfigurationRequest) { throw new UnsupportedOperationException(); } /** *

* Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration * will be applied by default to every new object placed in the specified bucket. *

* *

* DefaultRetention requires either Days or Years. You can't specify both at the same time. *

*
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutObjectLockConfigurationRequest.Builder} avoiding * the need to create one manually via {@link PutObjectLockConfigurationRequest#builder()} *

* * @param putObjectLockConfigurationRequest * A {@link Consumer} that will call methods on {@link PutObjectLockConfigurationRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutObjectLockConfiguration operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectLockConfiguration */ default CompletableFuture putObjectLockConfiguration( Consumer putObjectLockConfigurationRequest) { return putObjectLockConfiguration(PutObjectLockConfigurationRequest.builder() .applyMutation(putObjectLockConfigurationRequest).build()); } /** *

* Places an Object Retention configuration on an object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* Related Resources *

* * * @param putObjectRetentionRequest * @return A Java Future containing the result of the PutObjectRetention operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectRetention */ default CompletableFuture putObjectRetention(PutObjectRetentionRequest putObjectRetentionRequest) { throw new UnsupportedOperationException(); } /** *

* Places an Object Retention configuration on an object. *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutObjectRetentionRequest.Builder} avoiding the * need to create one manually via {@link PutObjectRetentionRequest#builder()} *

* * @param putObjectRetentionRequest * A {@link Consumer} that will call methods on {@link PutObjectRetentionRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutObjectRetention operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectRetention */ default CompletableFuture putObjectRetention( Consumer putObjectRetentionRequest) { return putObjectRetention(PutObjectRetentionRequest.builder().applyMutation(putObjectRetentionRequest).build()); } /** *

* Sets the supplied tag-set to an object that already exists in a bucket. *

*

* A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging * subresource that is associated with the object. You can retrieve tags by sending a GET request. For more * information, see GetObjectTagging. *

*

* For tagging-related restrictions related to characters and encodings, see Tag * Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object. *

*

* To use this operation, you must have permission to perform the s3:PutObjectTagging action. By * default, the bucket owner has this permission and can grant this permission to others. *

*

* To put tags of any other version, use the versionId query parameter. You also need permission for * the s3:PutObjectVersionTagging action. *

*

* For information about the Amazon S3 object tagging feature, see Object Tagging. *

*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: InvalidTagError *

      *
    • *
    • *

      * Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. * For more information, see Object * Tagging. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: MalformedXMLError *

      *
    • *
    • *

      * Cause: The XML provided does not match the schema. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: OperationAbortedError *

      *
    • *
    • *

      * Cause: A conflicting conditional operation is currently in progress against this resource. Please try * again. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: InternalError *

      *
    • *
    • *

      * Cause: The service was unable to apply the provided tag to the object. *

      *
    • *
    *
  • *
*

* Related Resources *

* * * @param putObjectTaggingRequest * @return A Java Future containing the result of the PutObjectTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectTagging */ default CompletableFuture putObjectTagging(PutObjectTaggingRequest putObjectTaggingRequest) { throw new UnsupportedOperationException(); } /** *

* Sets the supplied tag-set to an object that already exists in a bucket. *

*

* A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging * subresource that is associated with the object. You can retrieve tags by sending a GET request. For more * information, see GetObjectTagging. *

*

* For tagging-related restrictions related to characters and encodings, see Tag * Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object. *

*

* To use this operation, you must have permission to perform the s3:PutObjectTagging action. By * default, the bucket owner has this permission and can grant this permission to others. *

*

* To put tags of any other version, use the versionId query parameter. You also need permission for * the s3:PutObjectVersionTagging action. *

*

* For information about the Amazon S3 object tagging feature, see Object Tagging. *

*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: InvalidTagError *

      *
    • *
    • *

      * Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. * For more information, see Object * Tagging. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: MalformedXMLError *

      *
    • *
    • *

      * Cause: The XML provided does not match the schema. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: OperationAbortedError *

      *
    • *
    • *

      * Cause: A conflicting conditional operation is currently in progress against this resource. Please try * again. *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: InternalError *

      *
    • *
    • *

      * Cause: The service was unable to apply the provided tag to the object. *

      *
    • *
    *
  • *
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutObjectTaggingRequest.Builder} avoiding the need * to create one manually via {@link PutObjectTaggingRequest#builder()} *

* * @param putObjectTaggingRequest * A {@link Consumer} that will call methods on {@link PutObjectTaggingRequest.Builder} to create a request. * @return A Java Future containing the result of the PutObjectTagging operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutObjectTagging */ default CompletableFuture putObjectTagging( Consumer putObjectTaggingRequest) { return putObjectTagging(PutObjectTaggingRequest.builder().applyMutation(putObjectTaggingRequest).build()); } /** *

* Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this * operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about * Amazon S3 permissions, see Specifying Permissions in a * Policy. *

* *

* When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks * the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and * the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket * and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings. *

*
*

* For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of "Public". *

*

* Related Resources *

* * * @param putPublicAccessBlockRequest * @return A Java Future containing the result of the PutPublicAccessBlock operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutPublicAccessBlock */ default CompletableFuture putPublicAccessBlock( PutPublicAccessBlockRequest putPublicAccessBlockRequest) { throw new UnsupportedOperationException(); } /** *

* Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this * operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about * Amazon S3 permissions, see Specifying Permissions in a * Policy. *

* *

* When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks * the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and * the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket * and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings. *

*
*

* For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of "Public". *

*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link PutPublicAccessBlockRequest.Builder} avoiding the * need to create one manually via {@link PutPublicAccessBlockRequest#builder()} *

* * @param putPublicAccessBlockRequest * A {@link Consumer} that will call methods on {@link PutPublicAccessBlockRequest.Builder} to create a * request. * @return A Java Future containing the result of the PutPublicAccessBlock operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.PutPublicAccessBlock */ default CompletableFuture putPublicAccessBlock( Consumer putPublicAccessBlockRequest) { return putPublicAccessBlock(PutPublicAccessBlockRequest.builder().applyMutation(putPublicAccessBlockRequest).build()); } /** *

* Restores an archived copy of an object back into Amazon S3 *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* This action performs the following types of requests: *

*
    *
  • *

    * select - Perform a select query on an archived object *

    *
  • *
  • *

    * restore an archive - Restore an archived object *

    *
  • *
*

* To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket * owner has this permission by default and can grant this permission to others. For more information about * permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* Querying Archives with Select Requests *

*

* You use a select type of request to perform SQL queries on archived objects. The archived objects that are being * queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run * queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 * tier. For an overview about select requests, see Querying Archived * Objects in the Amazon Simple Storage Service Developer Guide. *

*

* When making a select request, do the following: *

*
    *
  • *

    * Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region * as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must * have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output * objects stored in the bucket. For more information about output, see Querying Archived * Objects in the Amazon Simple Storage Service Developer Guide. *

    *

    * For more information about the S3 structure in the request body, see the following: *

    * *
  • *
  • *

    * Define the SQL expression for the SELECT type of restoration for your query in the request body's * SelectParameters structure. You can use expressions like the following examples. *

    *
      *
    • *

      * The following expression returns all records from the specified object. *

      *

      * SELECT * FROM Object *

      *
    • *
    • *

      * Assuming that you are not using any headers for data stored in the object, you can specify columns with * positional headers. *

      *

      * SELECT s._1, s._2 FROM Object s WHERE s._3 > 100 *

      *
    • *
    • *

      * If you have headers and you set the fileHeaderInfo in the CSV structure in the request * body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field * to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header * column names. *

      *

      * SELECT s.Id, s.FirstName, s.SSN FROM S3Object s *

      *
    • *
    *
  • *
*

* For more information about using SQL with S3 Glacier Select restore, see SQL Reference for * Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide. *

*

* When making a select request, you can also do the following: *

*
    *
  • *

    * To expedite your queries, specify the Expedited tier. For more information about tiers, see * "Restoring Archives," later in this topic. *

    *
  • *
  • *

    * Specify details about the data serialization format of both the input object that is being queried and the * serialization of the CSV-encoded query results. *

    *
  • *
*

* The following are additional important facts about the select feature: *

*
    *
  • *

    * The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly * deleted-manually or through a lifecycle policy. *

    *
  • *
  • *

    * You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, * so avoid issuing duplicate requests. *

    *
  • *
  • *

    * Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return * error response 409. *

    *
  • *
*

* Restoring Archives *

*

* Objects that you archive to the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering Archive, or S3 * Intelligent-Tiering Deep Archive storage classes are not accessible in real time. For objects in Archive Access * tier or Deep Archive Access tier you must first initiate a restore request, and then wait until the object is * moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive you must first initiate * a restore request, and then wait until a temporary copy of the object is available. To access an archived object, * you must restore the object for the duration (number of days) that you specify. *

*

* To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 * restores the current version. *

*

* When restoring an archived object (or using a select request), you can specify one of the following data access * tier options in the Tier element of the request body: *

*
    *
  • *

    * Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 * Glacier or S3 Intelligent-Tiering Archive storage class when occasional urgent requests for a subset of archives * are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is * typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited * retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for * objects stored in the S3 Glacier Deep Archive or S3 Intelligent-Tiering Deep Archive storage class. *

    *
  • *
  • *

    * Standard - Standard retrievals allow you to access any of your archived objects within * several hours. This is the default option for retrieval requests that do not specify the retrieval option. * Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier or S3 * Intelligent-Tiering Archive storage class. They typically finish within 12 hours for objects stored in the S3 * Glacier Deep Archive or S3 Intelligent-Tiering Deep Archive storage class. Standard retrievals are free for * objects stored in S3 Intelligent-Tiering. *

    *
  • *
  • *

    * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to * retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours * for objects stored in the S3 Glacier or S3 Intelligent-Tiering Archive storage class. They typically finish * within 48 hours for objects stored in the S3 Glacier Deep Archive or S3 Intelligent-Tiering Deep Archive storage * class. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. *

    *
  • *
*

* For more information about archive retrieval options and provisioned capacity for Expedited data * access, see Restoring Archived * Objects in the Amazon Simple Storage Service Developer Guide. *

*

* You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in * progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide. *

*

* To get the status of object restoration, you can send a HEAD request. Operations return the * x-amz-restore header, which provides information about the restoration status, in the response. You * can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more * information, see Configuring * Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide. *

*

* After restoring an archived object, you can update the restoration period by reissuing the request with a new * period. Amazon S3 updates the restoration period relative to the current time and charges only for the * request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively * processing your current restore request for the object. *

*

* If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object * expiration overrides the life span that you specify in a restore request. For example, if you restore an object * copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For * more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management * in Amazon Simple Storage Service Developer Guide. *

*

* Responses *

*

* A successful operation returns either the 200 OK or 202 Accepted status code. *

*
    *
  • *

    * If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response. *

    *
  • *
  • *

    * If the object is previously restored, Amazon S3 returns 200 OK in the response. *

    *
  • *
*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: RestoreAlreadyInProgress *

      *
    • *
    • *

      * Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.) *

      *
    • *
    • *

      * HTTP Status Code: 409 Conflict *

      *
    • *
    • *

      * SOAP Fault Code Prefix: Client *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: GlacierExpeditedRetrievalNotAvailable *

      *
    • *
    • *

      * Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient * capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard * or Bulk retrievals.) *

      *
    • *
    • *

      * HTTP Status Code: 503 *

      *
    • *
    • *

      * SOAP Fault Code Prefix: N/A *

      *
    • *
    *
  • *
*

* Related Resources *

* * * @param restoreObjectRequest * @return A Java Future containing the result of the RestoreObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • ObjectAlreadyInActiveTierErrorException This operation is not allowed against this storage tier.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.RestoreObject */ default CompletableFuture restoreObject(RestoreObjectRequest restoreObjectRequest) { throw new UnsupportedOperationException(); } /** *

* Restores an archived copy of an object back into Amazon S3 *

*

* This action is not supported by Amazon S3 on Outposts. *

*

* This action performs the following types of requests: *

*
    *
  • *

    * select - Perform a select query on an archived object *

    *
  • *
  • *

    * restore an archive - Restore an archived object *

    *
  • *
*

* To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket * owner has this permission by default and can grant this permission to others. For more information about * permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your * Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide. *

*

* Querying Archives with Select Requests *

*

* You use a select type of request to perform SQL queries on archived objects. The archived objects that are being * queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run * queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 * tier. For an overview about select requests, see Querying Archived * Objects in the Amazon Simple Storage Service Developer Guide. *

*

* When making a select request, do the following: *

*
    *
  • *

    * Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region * as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must * have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output * objects stored in the bucket. For more information about output, see Querying Archived * Objects in the Amazon Simple Storage Service Developer Guide. *

    *

    * For more information about the S3 structure in the request body, see the following: *

    * *
  • *
  • *

    * Define the SQL expression for the SELECT type of restoration for your query in the request body's * SelectParameters structure. You can use expressions like the following examples. *

    *
      *
    • *

      * The following expression returns all records from the specified object. *

      *

      * SELECT * FROM Object *

      *
    • *
    • *

      * Assuming that you are not using any headers for data stored in the object, you can specify columns with * positional headers. *

      *

      * SELECT s._1, s._2 FROM Object s WHERE s._3 > 100 *

      *
    • *
    • *

      * If you have headers and you set the fileHeaderInfo in the CSV structure in the request * body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field * to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header * column names. *

      *

      * SELECT s.Id, s.FirstName, s.SSN FROM S3Object s *

      *
    • *
    *
  • *
*

* For more information about using SQL with S3 Glacier Select restore, see SQL Reference for * Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide. *

*

* When making a select request, you can also do the following: *

*
    *
  • *

    * To expedite your queries, specify the Expedited tier. For more information about tiers, see * "Restoring Archives," later in this topic. *

    *
  • *
  • *

    * Specify details about the data serialization format of both the input object that is being queried and the * serialization of the CSV-encoded query results. *

    *
  • *
*

* The following are additional important facts about the select feature: *

*
    *
  • *

    * The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly * deleted-manually or through a lifecycle policy. *

    *
  • *
  • *

    * You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, * so avoid issuing duplicate requests. *

    *
  • *
  • *

    * Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return * error response 409. *

    *
  • *
*

* Restoring Archives *

*

* Objects that you archive to the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering Archive, or S3 * Intelligent-Tiering Deep Archive storage classes are not accessible in real time. For objects in Archive Access * tier or Deep Archive Access tier you must first initiate a restore request, and then wait until the object is * moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive you must first initiate * a restore request, and then wait until a temporary copy of the object is available. To access an archived object, * you must restore the object for the duration (number of days) that you specify. *

*

* To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 * restores the current version. *

*

* When restoring an archived object (or using a select request), you can specify one of the following data access * tier options in the Tier element of the request body: *

*
    *
  • *

    * Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 * Glacier or S3 Intelligent-Tiering Archive storage class when occasional urgent requests for a subset of archives * are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is * typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited * retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for * objects stored in the S3 Glacier Deep Archive or S3 Intelligent-Tiering Deep Archive storage class. *

    *
  • *
  • *

    * Standard - Standard retrievals allow you to access any of your archived objects within * several hours. This is the default option for retrieval requests that do not specify the retrieval option. * Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier or S3 * Intelligent-Tiering Archive storage class. They typically finish within 12 hours for objects stored in the S3 * Glacier Deep Archive or S3 Intelligent-Tiering Deep Archive storage class. Standard retrievals are free for * objects stored in S3 Intelligent-Tiering. *

    *
  • *
  • *

    * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to * retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours * for objects stored in the S3 Glacier or S3 Intelligent-Tiering Archive storage class. They typically finish * within 48 hours for objects stored in the S3 Glacier Deep Archive or S3 Intelligent-Tiering Deep Archive storage * class. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. *

    *
  • *
*

* For more information about archive retrieval options and provisioned capacity for Expedited data * access, see Restoring Archived * Objects in the Amazon Simple Storage Service Developer Guide. *

*

* You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in * progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide. *

*

* To get the status of object restoration, you can send a HEAD request. Operations return the * x-amz-restore header, which provides information about the restoration status, in the response. You * can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more * information, see Configuring * Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide. *

*

* After restoring an archived object, you can update the restoration period by reissuing the request with a new * period. Amazon S3 updates the restoration period relative to the current time and charges only for the * request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively * processing your current restore request for the object. *

*

* If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object * expiration overrides the life span that you specify in a restore request. For example, if you restore an object * copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For * more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management * in Amazon Simple Storage Service Developer Guide. *

*

* Responses *

*

* A successful operation returns either the 200 OK or 202 Accepted status code. *

*
    *
  • *

    * If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response. *

    *
  • *
  • *

    * If the object is previously restored, Amazon S3 returns 200 OK in the response. *

    *
  • *
*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: RestoreAlreadyInProgress *

      *
    • *
    • *

      * Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.) *

      *
    • *
    • *

      * HTTP Status Code: 409 Conflict *

      *
    • *
    • *

      * SOAP Fault Code Prefix: Client *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: GlacierExpeditedRetrievalNotAvailable *

      *
    • *
    • *

      * Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient * capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard * or Bulk retrievals.) *

      *
    • *
    • *

      * HTTP Status Code: 503 *

      *
    • *
    • *

      * SOAP Fault Code Prefix: N/A *

      *
    • *
    *
  • *
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link RestoreObjectRequest.Builder} avoiding the need to * create one manually via {@link RestoreObjectRequest#builder()} *

* * @param restoreObjectRequest * A {@link Consumer} that will call methods on {@link RestoreObjectRequest.Builder} to create a request. * @return A Java Future containing the result of the RestoreObject operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • ObjectAlreadyInActiveTierErrorException This operation is not allowed against this storage tier.
  • *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.RestoreObject */ default CompletableFuture restoreObject(Consumer restoreObjectRequest) { return restoreObject(RestoreObjectRequest.builder().applyMutation(restoreObjectRequest).build()); } /** *

* Uploads a part in a multipart upload. *

* *

* In this operation, you provide part data in your request. However, you have an option to specify your existing * Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you * use the UploadPartCopy * operation. *

*
*

* You must initiate a multipart upload (see CreateMultipartUpload) * before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique * identifier, that you must include in your upload part request. *

*

* Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also * defines its position within the object being created. If you upload a new part using the same part number that * was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in * size, except the last part. There is no size limit on the last part of your multipart upload. *

*

* To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in * the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, * Amazon S3 returns an error. *

*

* If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 * header as a checksum instead of Content-MD5. For more information see Authenticating * Requests: Using the Authorization Header (AWS Signature Version 4). *

*

* Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort * multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either * complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts * storage. *

*

* For more information on multipart uploads, go to Multipart Upload Overview in the * Amazon Simple Storage Service Developer Guide . *

*

* For information on the permissions required to use the multipart upload API, go to Multipart Upload API and * Permissions in the Amazon Simple Storage Service Developer Guide. *

*

* You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in * its data centers and decrypts it for you when you access it. You have the option of providing your own encryption * key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the * request headers you provide in the request must match the headers you used in the request to initiate the upload * by using CreateMultipartUpload. * For more information, go to Using Server-Side * Encryption in the Amazon Simple Storage Service Developer Guide. *

*

* Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided * encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only * need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more * information, see CreateMultipartUpload. *

*

* If you requested server-side encryption using a customer-provided encryption key in your initiate multipart * upload request, you must provide identical encryption information in each part upload using the following * headers. *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: NoSuchUpload *

      *
    • *
    • *

      * Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload * might have been aborted or completed. *

      *
    • *
    • *

      * HTTP Status Code: 404 Not Found *

      *
    • *
    • *

      * SOAP Fault Code Prefix: Client *

      *
    • *
    *
  • *
*

* Related Resources *

* * * @param uploadPartRequest * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The * size of the content is expected to be known up front. See {@link AsyncRequestBody} for specific details on * implementing this interface as well as links to precanned implementations for common scenarios like * uploading from a file. The service documentation for the request content is as follows ' *

* Object data. *

* ' * @return A Java Future containing the result of the UploadPart operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.UploadPart */ default CompletableFuture uploadPart(UploadPartRequest uploadPartRequest, AsyncRequestBody requestBody) { throw new UnsupportedOperationException(); } /** *

* Uploads a part in a multipart upload. *

* *

* In this operation, you provide part data in your request. However, you have an option to specify your existing * Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you * use the UploadPartCopy * operation. *

*
*

* You must initiate a multipart upload (see CreateMultipartUpload) * before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique * identifier, that you must include in your upload part request. *

*

* Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also * defines its position within the object being created. If you upload a new part using the same part number that * was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in * size, except the last part. There is no size limit on the last part of your multipart upload. *

*

* To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in * the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, * Amazon S3 returns an error. *

*

* If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 * header as a checksum instead of Content-MD5. For more information see Authenticating * Requests: Using the Authorization Header (AWS Signature Version 4). *

*

* Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort * multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either * complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts * storage. *

*

* For more information on multipart uploads, go to Multipart Upload Overview in the * Amazon Simple Storage Service Developer Guide . *

*

* For information on the permissions required to use the multipart upload API, go to Multipart Upload API and * Permissions in the Amazon Simple Storage Service Developer Guide. *

*

* You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in * its data centers and decrypts it for you when you access it. You have the option of providing your own encryption * key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the * request headers you provide in the request must match the headers you used in the request to initiate the upload * by using CreateMultipartUpload. * For more information, go to Using Server-Side * Encryption in the Amazon Simple Storage Service Developer Guide. *

*

* Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided * encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only * need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more * information, see CreateMultipartUpload. *

*

* If you requested server-side encryption using a customer-provided encryption key in your initiate multipart * upload request, you must provide identical encryption information in each part upload using the following * headers. *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: NoSuchUpload *

      *
    • *
    • *

      * Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload * might have been aborted or completed. *

      *
    • *
    • *

      * HTTP Status Code: 404 Not Found *

      *
    • *
    • *

      * SOAP Fault Code Prefix: Client *

      *
    • *
    *
  • *
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link UploadPartRequest.Builder} avoiding the need to * create one manually via {@link UploadPartRequest#builder()} *

* * @param uploadPartRequest * A {@link Consumer} that will call methods on {@link UploadPartRequest.Builder} to create a request. * @param requestBody * Functional interface that can be implemented to produce the request content in a non-blocking manner. The * size of the content is expected to be known up front. See {@link AsyncRequestBody} for specific details on * implementing this interface as well as links to precanned implementations for common scenarios like * uploading from a file. The service documentation for the request content is as follows ' *

* Object data. *

* ' * @return A Java Future containing the result of the UploadPart operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.UploadPart */ default CompletableFuture uploadPart(Consumer uploadPartRequest, AsyncRequestBody requestBody) { return uploadPart(UploadPartRequest.builder().applyMutation(uploadPartRequest).build(), requestBody); } /** *

* Uploads a part in a multipart upload. *

* *

* In this operation, you provide part data in your request. However, you have an option to specify your existing * Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you * use the UploadPartCopy * operation. *

*
*

* You must initiate a multipart upload (see CreateMultipartUpload) * before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique * identifier, that you must include in your upload part request. *

*

* Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also * defines its position within the object being created. If you upload a new part using the same part number that * was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in * size, except the last part. There is no size limit on the last part of your multipart upload. *

*

* To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in * the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, * Amazon S3 returns an error. *

*

* If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 * header as a checksum instead of Content-MD5. For more information see Authenticating * Requests: Using the Authorization Header (AWS Signature Version 4). *

*

* Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort * multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either * complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts * storage. *

*

* For more information on multipart uploads, go to Multipart Upload Overview in the * Amazon Simple Storage Service Developer Guide . *

*

* For information on the permissions required to use the multipart upload API, go to Multipart Upload API and * Permissions in the Amazon Simple Storage Service Developer Guide. *

*

* You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in * its data centers and decrypts it for you when you access it. You have the option of providing your own encryption * key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the * request headers you provide in the request must match the headers you used in the request to initiate the upload * by using CreateMultipartUpload. * For more information, go to Using Server-Side * Encryption in the Amazon Simple Storage Service Developer Guide. *

*

* Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided * encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only * need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more * information, see CreateMultipartUpload. *

*

* If you requested server-side encryption using a customer-provided encryption key in your initiate multipart * upload request, you must provide identical encryption information in each part upload using the following * headers. *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: NoSuchUpload *

      *
    • *
    • *

      * Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload * might have been aborted or completed. *

      *
    • *
    • *

      * HTTP Status Code: 404 Not Found *

      *
    • *
    • *

      * SOAP Fault Code Prefix: Client *

      *
    • *
    *
  • *
*

* Related Resources *

* * * @param uploadPartRequest * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read * multiple times in the event of a retry. If the file does not exist or the current user does not have * access to read it then an exception will be thrown. The service documentation for the request content is * as follows ' *

* Object data. *

* ' * @return A Java Future containing the result of the UploadPart operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.UploadPart */ default CompletableFuture uploadPart(UploadPartRequest uploadPartRequest, Path sourcePath) { return uploadPart(uploadPartRequest, AsyncRequestBody.fromFile(sourcePath)); } /** *

* Uploads a part in a multipart upload. *

* *

* In this operation, you provide part data in your request. However, you have an option to specify your existing * Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you * use the UploadPartCopy * operation. *

*
*

* You must initiate a multipart upload (see CreateMultipartUpload) * before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique * identifier, that you must include in your upload part request. *

*

* Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also * defines its position within the object being created. If you upload a new part using the same part number that * was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in * size, except the last part. There is no size limit on the last part of your multipart upload. *

*

* To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in * the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, * Amazon S3 returns an error. *

*

* If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256 * header as a checksum instead of Content-MD5. For more information see Authenticating * Requests: Using the Authorization Header (AWS Signature Version 4). *

*

* Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort * multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either * complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts * storage. *

*

* For more information on multipart uploads, go to Multipart Upload Overview in the * Amazon Simple Storage Service Developer Guide . *

*

* For information on the permissions required to use the multipart upload API, go to Multipart Upload API and * Permissions in the Amazon Simple Storage Service Developer Guide. *

*

* You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in * its data centers and decrypts it for you when you access it. You have the option of providing your own encryption * key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the * request headers you provide in the request must match the headers you used in the request to initiate the upload * by using CreateMultipartUpload. * For more information, go to Using Server-Side * Encryption in the Amazon Simple Storage Service Developer Guide. *

*

* Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided * encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only * need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more * information, see CreateMultipartUpload. *

*

* If you requested server-side encryption using a customer-provided encryption key in your initiate multipart * upload request, you must provide identical encryption information in each part upload using the following * headers. *

*
    *
  • *

    * x-amz-server-side-encryption-customer-algorithm *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key *

    *
  • *
  • *

    * x-amz-server-side-encryption-customer-key-MD5 *

    *
  • *
*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: NoSuchUpload *

      *
    • *
    • *

      * Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload * might have been aborted or completed. *

      *
    • *
    • *

      * HTTP Status Code: 404 Not Found *

      *
    • *
    • *

      * SOAP Fault Code Prefix: Client *

      *
    • *
    *
  • *
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link UploadPartRequest.Builder} avoiding the need to * create one manually via {@link UploadPartRequest#builder()} *

* * @param uploadPartRequest * A {@link Consumer} that will call methods on {@link UploadPartRequest.Builder} to create a request. * @param sourcePath * {@link Path} to file containing data to send to the service. File will be read entirely and may be read * multiple times in the event of a retry. If the file does not exist or the current user does not have * access to read it then an exception will be thrown. The service documentation for the request content is * as follows ' *

* Object data. *

* ' * @return A Java Future containing the result of the UploadPart operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.UploadPart */ default CompletableFuture uploadPart(Consumer uploadPartRequest, Path sourcePath) { return uploadPart(UploadPartRequest.builder().applyMutation(uploadPartRequest).build(), sourcePath); } /** *

* Uploads a part by copying data from an existing object as data source. You specify the data source by adding the * request header x-amz-copy-source in your request and a byte range by adding the request header * x-amz-copy-source-range in your request. *

*

* The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload * limits, go to Quick Facts in the * Amazon Simple Storage Service Developer Guide. *

* *

* Instead of using an existing object as part data, you might use the UploadPart operation and provide * data in your request. *

*
*

* You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon * S3 returns a unique identifier, the upload ID, that you must include in your upload part request. *

*

* For more information about using the UploadPartCopy operation, see the following: *

*
    *
  • *

    * For conceptual information about multipart uploads, see Uploading Objects Using Multipart * Upload in the Amazon Simple Storage Service Developer Guide. *

    *
  • *
  • *

    * For information about permissions required to use the multipart upload API, see Multipart Upload API and * Permissions in the Amazon Simple Storage Service Developer Guide. *

    *
  • *
  • *

    * For information about copying objects using a single atomic operation vs. the multipart upload, see Operations on Objects in the * Amazon Simple Storage Service Developer Guide. *

    *
  • *
  • *

    * For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy * operation, see CopyObject and * UploadPart. *

    *
  • *
*

* Note the following additional considerations about the request headers x-amz-copy-source-if-match, * x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and * x-amz-copy-source-if-modified-since: *

*

*

*
    *
  • *

    * Consideration 1 - If both of the x-amz-copy-source-if-match and * x-amz-copy-source-if-unmodified-since headers are present in the request as follows: *

    *

    * x-amz-copy-source-if-match condition evaluates to true, and; *

    *

    * x-amz-copy-source-if-unmodified-since condition evaluates to false; *

    *

    * Amazon S3 returns 200 OK and copies the data. *

    *
  • *
  • *

    * Consideration 2 - If both of the x-amz-copy-source-if-none-match and * x-amz-copy-source-if-modified-since headers are present in the request as follows: *

    *

    * x-amz-copy-source-if-none-match condition evaluates to false, and; *

    *

    * x-amz-copy-source-if-modified-since condition evaluates to true; *

    *

    * Amazon S3 returns 412 Precondition Failed response code. *

    *
  • *
*

* Versioning *

*

* If your bucket has versioning enabled, you could have multiple versions of the same object. By default, * x-amz-copy-source identifies the current version of the object to copy. If the current version is a * delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 * error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the * versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a * delete marker as a version for the x-amz-copy-source. *

*

* You can optionally specify a specific version of the source object to copy by adding the versionId * subresource as shown in the following example: *

*

* x-amz-copy-source: /bucket/object?versionId=version id *

*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: NoSuchUpload *

      *
    • *
    • *

      * Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload * might have been aborted or completed. *

      *
    • *
    • *

      * HTTP Status Code: 404 Not Found *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: InvalidRequest *

      *
    • *
    • *

      * Cause: The specified copy source is not supported as a byte-range copy source. *

      *
    • *
    • *

      * HTTP Status Code: 400 Bad Request *

      *
    • *
    *
  • *
*

* Related Resources *

* * * @param uploadPartCopyRequest * @return A Java Future containing the result of the UploadPartCopy operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.UploadPartCopy */ default CompletableFuture uploadPartCopy(UploadPartCopyRequest uploadPartCopyRequest) { throw new UnsupportedOperationException(); } /** *

* Uploads a part by copying data from an existing object as data source. You specify the data source by adding the * request header x-amz-copy-source in your request and a byte range by adding the request header * x-amz-copy-source-range in your request. *

*

* The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload * limits, go to Quick Facts in the * Amazon Simple Storage Service Developer Guide. *

* *

* Instead of using an existing object as part data, you might use the UploadPart operation and provide * data in your request. *

*
*

* You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon * S3 returns a unique identifier, the upload ID, that you must include in your upload part request. *

*

* For more information about using the UploadPartCopy operation, see the following: *

*
    *
  • *

    * For conceptual information about multipart uploads, see Uploading Objects Using Multipart * Upload in the Amazon Simple Storage Service Developer Guide. *

    *
  • *
  • *

    * For information about permissions required to use the multipart upload API, see Multipart Upload API and * Permissions in the Amazon Simple Storage Service Developer Guide. *

    *
  • *
  • *

    * For information about copying objects using a single atomic operation vs. the multipart upload, see Operations on Objects in the * Amazon Simple Storage Service Developer Guide. *

    *
  • *
  • *

    * For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy * operation, see CopyObject and * UploadPart. *

    *
  • *
*

* Note the following additional considerations about the request headers x-amz-copy-source-if-match, * x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and * x-amz-copy-source-if-modified-since: *

*

*

*
    *
  • *

    * Consideration 1 - If both of the x-amz-copy-source-if-match and * x-amz-copy-source-if-unmodified-since headers are present in the request as follows: *

    *

    * x-amz-copy-source-if-match condition evaluates to true, and; *

    *

    * x-amz-copy-source-if-unmodified-since condition evaluates to false; *

    *

    * Amazon S3 returns 200 OK and copies the data. *

    *
  • *
  • *

    * Consideration 2 - If both of the x-amz-copy-source-if-none-match and * x-amz-copy-source-if-modified-since headers are present in the request as follows: *

    *

    * x-amz-copy-source-if-none-match condition evaluates to false, and; *

    *

    * x-amz-copy-source-if-modified-since condition evaluates to true; *

    *

    * Amazon S3 returns 412 Precondition Failed response code. *

    *
  • *
*

* Versioning *

*

* If your bucket has versioning enabled, you could have multiple versions of the same object. By default, * x-amz-copy-source identifies the current version of the object to copy. If the current version is a * delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 * error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the * versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a * delete marker as a version for the x-amz-copy-source. *

*

* You can optionally specify a specific version of the source object to copy by adding the versionId * subresource as shown in the following example: *

*

* x-amz-copy-source: /bucket/object?versionId=version id *

*

* Special Errors *

*
    *
  • *
      *
    • *

      * Code: NoSuchUpload *

      *
    • *
    • *

      * Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload * might have been aborted or completed. *

      *
    • *
    • *

      * HTTP Status Code: 404 Not Found *

      *
    • *
    *
  • *
  • *
      *
    • *

      * Code: InvalidRequest *

      *
    • *
    • *

      * Cause: The specified copy source is not supported as a byte-range copy source. *

      *
    • *
    • *

      * HTTP Status Code: 400 Bad Request *

      *
    • *
    *
  • *
*

* Related Resources *

* *
*

* This is a convenience which creates an instance of the {@link UploadPartCopyRequest.Builder} avoiding the need to * create one manually via {@link UploadPartCopyRequest#builder()} *

* * @param uploadPartCopyRequest * A {@link Consumer} that will call methods on {@link UploadPartCopyRequest.Builder} to create a request. * @return A Java Future containing the result of the UploadPartCopy operation returned by the service.
* The CompletableFuture returned by this method can be completed exceptionally with the following * exceptions. *
    *
  • SdkException Base class for all exceptions that can be thrown by the SDK (both service and client). * Can be used for catch all scenarios.
  • *
  • SdkClientException If any client side error occurs such as an IO related failure, failure to get * credentials, etc.
  • *
  • S3Exception Base class for all service exceptions. Unknown exceptions will be thrown as an instance * of this type.
  • *
* @sample S3AsyncClient.UploadPartCopy */ default CompletableFuture uploadPartCopy(Consumer uploadPartCopyRequest) { return uploadPartCopy(UploadPartCopyRequest.builder().applyMutation(uploadPartCopyRequest).build()); } /** * Creates an instance of {@link S3Utilities} object with the configuration set on this client. */ default S3Utilities utilities() { throw new UnsupportedOperationException(); } /** * Create an instance of {@link S3AsyncWaiter} using this client. *

* Waiters created via this method are managed by the SDK and resources will be released when the service client is * closed. * * @return an instance of {@link S3AsyncWaiter} */ default S3AsyncWaiter waiter() { throw new UnsupportedOperationException(); } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy