com.ibm.cloud.objectstorage.services.s3.AmazonS3Client Maven / Gradle / Ivy
Show all versions of ibm-cos-java-sdk-bundle Show documentation
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.ibm.cloud.objectstorage.services.s3;
import static com.ibm.cloud.objectstorage.event.SDKProgressPublisher.publishProgress;
import static com.ibm.cloud.objectstorage.internal.ResettableInputStream.newResettableInputStream;
import static com.ibm.cloud.objectstorage.services.s3.model.S3DataSource.Utils.cleanupDataSource;
import static com.ibm.cloud.objectstorage.util.LengthCheckInputStream.EXCLUDE_SKIPPED_BYTES;
import static com.ibm.cloud.objectstorage.util.LengthCheckInputStream.INCLUDE_SKIPPED_BYTES;
import static com.ibm.cloud.objectstorage.util.Throwables.failure;
import static com.ibm.cloud.objectstorage.util.ValidationUtils.assertNotNull;
import static com.ibm.cloud.objectstorage.util.ValidationUtils.assertStringNotEmpty;
import com.ibm.cloud.objectstorage.AmazonClientException;
import com.ibm.cloud.objectstorage.AmazonServiceException;
import com.ibm.cloud.objectstorage.AmazonServiceException.ErrorType;
import com.ibm.cloud.objectstorage.AmazonWebServiceClient;
import com.ibm.cloud.objectstorage.AmazonWebServiceRequest;
import com.ibm.cloud.objectstorage.AmazonWebServiceResponse;
import com.ibm.cloud.objectstorage.ClientConfiguration;
import com.ibm.cloud.objectstorage.DefaultRequest;
import com.ibm.cloud.objectstorage.HttpMethod;
import com.ibm.cloud.objectstorage.Protocol;
import com.ibm.cloud.objectstorage.Request;
import com.ibm.cloud.objectstorage.ResetException;
import com.ibm.cloud.objectstorage.Response;
import com.ibm.cloud.objectstorage.SDKGlobalConfiguration;
import com.ibm.cloud.objectstorage.SdkClientException;
import com.ibm.cloud.objectstorage.annotation.SdkInternalApi;
import com.ibm.cloud.objectstorage.annotation.SdkTestInternalApi;
import com.ibm.cloud.objectstorage.annotation.ThreadSafe;
import com.ibm.cloud.objectstorage.auth.AWSCredentials;
import com.ibm.cloud.objectstorage.auth.AWSCredentialsProvider;
import com.ibm.cloud.objectstorage.auth.Presigner;
import com.ibm.cloud.objectstorage.auth.Signer;
import com.ibm.cloud.objectstorage.auth.SignerFactory;
import com.ibm.cloud.objectstorage.client.builder.AwsClientBuilder;
import com.ibm.cloud.objectstorage.event.ProgressEventType;
import com.ibm.cloud.objectstorage.event.ProgressInputStream;
import com.ibm.cloud.objectstorage.event.ProgressListener;
import com.ibm.cloud.objectstorage.handlers.HandlerChainFactory;
import com.ibm.cloud.objectstorage.handlers.HandlerContextKey;
import com.ibm.cloud.objectstorage.handlers.RequestHandler2;
import com.ibm.cloud.objectstorage.http.ExecutionContext;
import com.ibm.cloud.objectstorage.http.HttpMethodName;
import com.ibm.cloud.objectstorage.http.HttpResponseHandler;
import com.ibm.cloud.objectstorage.internal.DefaultServiceEndpointBuilder;
import com.ibm.cloud.objectstorage.internal.IdentityEndpointBuilder;
import com.ibm.cloud.objectstorage.internal.ReleasableInputStream;
import com.ibm.cloud.objectstorage.internal.ResettableInputStream;
import com.ibm.cloud.objectstorage.internal.ServiceEndpointBuilder;
import com.ibm.cloud.objectstorage.internal.StaticCredentialsProvider;
import com.ibm.cloud.objectstorage.internal.auth.SignerProvider;
import com.ibm.cloud.objectstorage.metrics.AwsSdkMetrics;
import com.ibm.cloud.objectstorage.metrics.RequestMetricCollector;
import com.ibm.cloud.objectstorage.oauth.IBMOAuthCredentials;
import com.ibm.cloud.objectstorage.oauth.IBMOAuthSigner;
import com.ibm.cloud.objectstorage.oauth.OAuthServiceException;
import com.ibm.cloud.objectstorage.regions.RegionUtils;
import com.ibm.cloud.objectstorage.regions.Regions;
import com.ibm.cloud.objectstorage.retry.PredefinedRetryPolicies;
import com.ibm.cloud.objectstorage.retry.RetryPolicy;
import com.ibm.cloud.objectstorage.services.s3.internal.AWSS3V4Signer;
import com.ibm.cloud.objectstorage.services.s3.internal.BucketNameUtils;
import com.ibm.cloud.objectstorage.services.s3.internal.CompleteMultipartUploadRetryCondition;
import com.ibm.cloud.objectstorage.services.s3.internal.Constants;
import com.ibm.cloud.objectstorage.services.s3.internal.DeleteObjectTaggingHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.DeleteObjectsResponse;
import com.ibm.cloud.objectstorage.services.s3.internal.DigestValidationInputStream;
import com.ibm.cloud.objectstorage.services.s3.internal.DualstackEndpointBuilder;
import com.ibm.cloud.objectstorage.services.s3.internal.GetObjectTaggingResponseHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.InitiateMultipartUploadHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.InputSubstream;
import com.ibm.cloud.objectstorage.services.s3.internal.ListPartsHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.MD5DigestCalculatingInputStream;
import com.ibm.cloud.objectstorage.services.s3.internal.Mimetypes;
import com.ibm.cloud.objectstorage.services.s3.internal.MultiFileOutputStream;
import com.ibm.cloud.objectstorage.services.s3.internal.ObjectExpirationHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.ResponseHeaderHandlerChain;
import com.ibm.cloud.objectstorage.services.s3.internal.S3ErrorResponseHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.S3MetadataResponseHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.S3ObjectResponseHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.S3QueryStringSigner;
import com.ibm.cloud.objectstorage.services.s3.internal.S3RequestEndpointResolver;
import com.ibm.cloud.objectstorage.services.s3.internal.S3RequesterChargedHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.S3Signer;
import com.ibm.cloud.objectstorage.services.s3.internal.S3V4AuthErrorRetryStrategy;
import com.ibm.cloud.objectstorage.services.s3.internal.S3VersionHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.S3XmlResponseHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.ServerSideEncryptionHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.ServiceUtils;
import com.ibm.cloud.objectstorage.services.s3.internal.SetObjectTaggingResponseHeaderHandler;
import com.ibm.cloud.objectstorage.services.s3.internal.SkipMd5CheckStrategy;
import com.ibm.cloud.objectstorage.services.s3.internal.XmlWriter;
import com.ibm.cloud.objectstorage.services.s3.internal.auth.S3SignerProvider;
import com.ibm.cloud.objectstorage.services.s3.metrics.S3ServiceMetric;
import com.ibm.cloud.objectstorage.services.s3.model.AbortMultipartUploadRequest;
import com.ibm.cloud.objectstorage.services.s3.model.AccessControlList;
import com.ibm.cloud.objectstorage.services.s3.model.AddLegalHoldRequest;
import com.ibm.cloud.objectstorage.services.s3.model.AmazonS3Exception;
import com.ibm.cloud.objectstorage.services.s3.model.Bucket;
import com.ibm.cloud.objectstorage.services.s3.model.BucketCrossOriginConfiguration;
import com.ibm.cloud.objectstorage.services.s3.model.BucketLifecycleConfiguration;
import com.ibm.cloud.objectstorage.services.s3.model.BucketProtectionConfiguration;
import com.ibm.cloud.objectstorage.services.s3.model.BucketTaggingConfiguration;
import com.ibm.cloud.objectstorage.services.s3.model.BucketVersioningConfiguration;
import com.ibm.cloud.objectstorage.services.s3.model.BucketWebsiteConfiguration;
import com.ibm.cloud.objectstorage.services.s3.model.CannedAccessControlList;
import com.ibm.cloud.objectstorage.services.s3.model.CompleteMultipartUploadRequest;
import com.ibm.cloud.objectstorage.services.s3.model.CompleteMultipartUploadResult;
import com.ibm.cloud.objectstorage.services.s3.model.CopyObjectRequest;
import com.ibm.cloud.objectstorage.services.s3.model.CopyObjectResult;
import com.ibm.cloud.objectstorage.services.s3.model.CopyPartRequest;
import com.ibm.cloud.objectstorage.services.s3.model.CopyPartResult;
import com.ibm.cloud.objectstorage.services.s3.model.CreateBucketRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteBucketCrossOriginConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteBucketLifecycleConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteBucketRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteBucketTaggingConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteBucketWebsiteConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteLegalHoldRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteObjectRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteObjectTaggingRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteObjectTaggingResult;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteObjectsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteObjectsResult;
import com.ibm.cloud.objectstorage.services.s3.model.DeleteVersionRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ExtendObjectRetentionRequest;
import com.ibm.cloud.objectstorage.services.s3.model.FASPConnectionInfo;
import com.ibm.cloud.objectstorage.services.s3.model.GeneratePresignedUrlRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GenericBucketRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetBucketAclRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetBucketCrossOriginConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetBucketFaspConnectionInfoRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetBucketLifecycleConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetBucketProtectionConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetBucketTaggingConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetBucketVersioningConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetBucketWebsiteConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetObjectAclRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetObjectMetadataRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetObjectRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetObjectTaggingRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetObjectTaggingResult;
import com.ibm.cloud.objectstorage.services.s3.model.GetRequestPaymentConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetS3AccountOwnerRequest;
import com.ibm.cloud.objectstorage.services.s3.model.Grant;
import com.ibm.cloud.objectstorage.services.s3.model.Grantee;
import com.ibm.cloud.objectstorage.services.s3.model.GroupGrantee;
import com.ibm.cloud.objectstorage.services.s3.model.HeadBucketRequest;
import com.ibm.cloud.objectstorage.services.s3.model.HeadBucketResult;
import com.ibm.cloud.objectstorage.services.s3.model.InitiateMultipartUploadRequest;
import com.ibm.cloud.objectstorage.services.s3.model.InitiateMultipartUploadResult;
import com.ibm.cloud.objectstorage.services.s3.model.ListBucketsExtendedRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ListBucketsExtendedResponse;
import com.ibm.cloud.objectstorage.services.s3.model.ListBucketsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ListLegalHoldsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ListLegalHoldsResult;
import com.ibm.cloud.objectstorage.services.s3.model.ListMultipartUploadsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ListNextBatchOfObjectsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ListNextBatchOfVersionsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ListObjectsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ListObjectsV2Request;
import com.ibm.cloud.objectstorage.services.s3.model.ListObjectsV2Result;
import com.ibm.cloud.objectstorage.services.s3.model.ListPartsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.ListVersionsRequest;
import com.ibm.cloud.objectstorage.services.s3.model.MultiFactorAuthentication;
import com.ibm.cloud.objectstorage.services.s3.model.MultiObjectDeleteException;
import com.ibm.cloud.objectstorage.services.s3.model.MultipartUploadListing;
import com.ibm.cloud.objectstorage.services.s3.model.ObjectListing;
import com.ibm.cloud.objectstorage.services.s3.model.ObjectMetadata;
import com.ibm.cloud.objectstorage.services.s3.model.ObjectTagging;
import com.ibm.cloud.objectstorage.services.s3.model.Owner;
import com.ibm.cloud.objectstorage.services.s3.model.PartETag;
import com.ibm.cloud.objectstorage.services.s3.model.PartListing;
import com.ibm.cloud.objectstorage.services.s3.model.Permission;
import com.ibm.cloud.objectstorage.services.s3.model.PutObjectRequest;
import com.ibm.cloud.objectstorage.services.s3.model.PutObjectResult;
import com.ibm.cloud.objectstorage.services.s3.model.Region;
import com.ibm.cloud.objectstorage.services.s3.model.RequestPaymentConfiguration;
import com.ibm.cloud.objectstorage.services.s3.model.ResponseHeaderOverrides;
import com.ibm.cloud.objectstorage.services.s3.model.RestoreObjectRequest;
import com.ibm.cloud.objectstorage.services.s3.model.S3AccelerateUnsupported;
import com.ibm.cloud.objectstorage.services.s3.model.S3Object;
import com.ibm.cloud.objectstorage.services.s3.model.S3ObjectInputStream;
import com.ibm.cloud.objectstorage.services.s3.model.SSEAwsKeyManagementParams;
import com.ibm.cloud.objectstorage.services.s3.model.SSEAwsKeyManagementParamsProvider;
import com.ibm.cloud.objectstorage.services.s3.model.SSECustomerKey;
import com.ibm.cloud.objectstorage.services.s3.model.SSECustomerKeyProvider;
import com.ibm.cloud.objectstorage.services.s3.model.SetBucketAclRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetBucketCrossOriginConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetBucketLifecycleConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetBucketProtectionConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetBucketTaggingConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetBucketVersioningConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetBucketWebsiteConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetObjectAclRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetObjectTaggingRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetObjectTaggingResult;
import com.ibm.cloud.objectstorage.services.s3.model.SetPublicAccessBlockRequest;
import com.ibm.cloud.objectstorage.services.s3.model.SetPublicAccessBlockResult;
import com.ibm.cloud.objectstorage.services.s3.model.PublicAccessBlockConfiguration;
import com.ibm.cloud.objectstorage.services.s3.model.GetPublicAccessBlockRequest;
import com.ibm.cloud.objectstorage.services.s3.model.GetPublicAccessBlockResult;
import com.ibm.cloud.objectstorage.services.s3.model.DeletePublicAccessBlockRequest;
import com.ibm.cloud.objectstorage.services.s3.model.DeletePublicAccessBlockResult;
import com.ibm.cloud.objectstorage.services.s3.model.SetRequestPaymentConfigurationRequest;
import com.ibm.cloud.objectstorage.services.s3.model.StorageClass;
import com.ibm.cloud.objectstorage.services.s3.model.Tag;
import com.ibm.cloud.objectstorage.services.s3.model.UploadObjectRequest;
import com.ibm.cloud.objectstorage.services.s3.model.UploadPartRequest;
import com.ibm.cloud.objectstorage.services.s3.model.UploadPartResult;
import com.ibm.cloud.objectstorage.services.s3.model.VersionListing;
import com.ibm.cloud.objectstorage.services.s3.model.transform.AclXmlFactory;
import com.ibm.cloud.objectstorage.services.s3.model.transform.BucketConfigurationXmlFactory;
import com.ibm.cloud.objectstorage.services.s3.model.transform.GetPublicAccessBlockStaxUnmarshaller;
import com.ibm.cloud.objectstorage.services.s3.model.transform.HeadBucketResultHandler;
import com.ibm.cloud.objectstorage.services.s3.model.transform.MultiObjectDeleteXmlFactory;
import com.ibm.cloud.objectstorage.services.s3.model.transform.ObjectTaggingXmlFactory;
import com.ibm.cloud.objectstorage.services.s3.model.transform.RequestPaymentConfigurationXmlFactory;
import com.ibm.cloud.objectstorage.services.s3.model.transform.RequestXmlFactory;
import com.ibm.cloud.objectstorage.services.s3.model.transform.Unmarshallers;
import com.ibm.cloud.objectstorage.services.s3.model.transform.XmlResponsesSaxParser.CompleteMultipartUploadHandler;
import com.ibm.cloud.objectstorage.services.s3.model.transform.XmlResponsesSaxParser.CopyObjectResultHandler;
import com.ibm.cloud.objectstorage.services.s3.request.S3HandlerContextKeys;
import com.ibm.cloud.objectstorage.services.s3.waiters.AmazonS3Waiters;
import com.ibm.cloud.objectstorage.transform.Unmarshaller;
import com.ibm.cloud.objectstorage.util.AWSRequestMetrics;
import com.ibm.cloud.objectstorage.util.AWSRequestMetrics.Field;
import com.ibm.cloud.objectstorage.util.AwsHostNameUtils;
import com.ibm.cloud.objectstorage.util.Base16;
import com.ibm.cloud.objectstorage.util.Base64;
import com.ibm.cloud.objectstorage.util.BinaryUtils;
import com.ibm.cloud.objectstorage.util.CredentialUtils;
import com.ibm.cloud.objectstorage.util.DateUtils;
import com.ibm.cloud.objectstorage.util.IOUtils;
import com.ibm.cloud.objectstorage.util.LengthCheckInputStream;
import com.ibm.cloud.objectstorage.util.Md5Utils;
import com.ibm.cloud.objectstorage.util.RuntimeHttpUtils;
import com.ibm.cloud.objectstorage.util.SdkHttpUtils;
import com.ibm.cloud.objectstorage.util.ServiceClientHolderInputStream;
import com.ibm.cloud.objectstorage.util.StringUtils;
import com.ibm.cloud.objectstorage.util.ValidationUtils;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URL;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.regex.Matcher;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.client.methods.HttpRequestBase;
/**
*
* Provides the client for accessing the Amazon S3 web service.
*
*
* Amazon S3 provides storage for the Internet,
* and is designed to make web-scale computing easier for developers.
*
*
* The Amazon S3 Java Client provides a simple interface that can be
* used to store and retrieve any amount of data, at any time,
* from anywhere on the web. It gives any developer access to the same
* highly scalable, reliable, secure, fast, inexpensive infrastructure
* that Amazon uses to run its own global network of web sites.
* The service aims to maximize benefits of scale and to pass those
* benefits on to developers.
*
*
* For more information about Amazon S3, please see
*
* http://aws.amazon.com/s3
*
*/
@ThreadSafe
public class AmazonS3Client extends AmazonWebServiceClient implements AmazonS3 {
public static final String S3_SERVICE_NAME = "s3";
private static final String S3_SIGNER = "S3SignerType";
private static final String S3_V4_SIGNER = "AWSS3V4SignerType";
public static final String OAUTH_SIGNER = "OAuthSignerType";
protected static final AmazonS3ClientConfigurationFactory configFactory
= new AmazonS3ClientConfigurationFactory();
/** Shared logger for client events */
private static Log log = LogFactory.getLog(AmazonS3Client.class);
static {
// Enable S3 specific predefined request metrics.
AwsSdkMetrics.addAll(Arrays.asList(S3ServiceMetric.values()));
// Register S3-specific signers.
SignerFactory.registerSigner(S3_SIGNER, S3Signer.class);
SignerFactory.registerSigner(S3_V4_SIGNER, AWSS3V4Signer.class);
SignerFactory.registerSigner(OAUTH_SIGNER, IBMOAuthSigner.class);
}
private volatile AmazonS3Waiters waiters;
/** Provider for AWS credentials. */
protected final AWSCredentialsProvider awsCredentialsProvider;
/** Responsible for handling error responses from all S3 service calls. */
protected final S3ErrorResponseHandler errorResponseHandler = new S3ErrorResponseHandler();
/** Shared response handler for operations with no response. */
private final S3XmlResponseHandler voidResponseHandler = new S3XmlResponseHandler(null);
/** Shared factory for converting configuration objects to XML */
private static final BucketConfigurationXmlFactory bucketConfigurationXmlFactory = new BucketConfigurationXmlFactory();
/** Shared factory for converting request payment configuration objects to XML */
private static final RequestPaymentConfigurationXmlFactory requestPaymentConfigurationXmlFactory = new RequestPaymentConfigurationXmlFactory();
/** S3 specific client configuration options */
private volatile S3ClientOptions clientOptions = S3ClientOptions.builder().build();
/**
* The S3 client region that is set by either (a) calling
* setRegion/configureRegion OR (b) calling setEndpoint with a
* region-specific S3 endpoint. This region string will be used for signing
* requests sent by this client.
*/
private volatile String clientRegion;
private static final int BUCKET_REGION_CACHE_SIZE = 300;
private static final Map bucketRegionCache =
Collections.synchronizedMap(new LinkedHashMap(BUCKET_REGION_CACHE_SIZE, 1.1f, true) {
private static final long serialVersionUID = 23453L;
@Override
protected boolean removeEldestEntry(Map.Entry eldest) {
return size() > BUCKET_REGION_CACHE_SIZE;
}
});
static Map getBucketRegionCache() {
return bucketRegionCache;
}
private final SkipMd5CheckStrategy skipMd5CheckStrategy;
private final CompleteMultipartUploadRetryCondition
completeMultipartUploadRetryCondition = new CompleteMultipartUploadRetryCondition();
/**
* Constructs a new client to invoke service methods on Amazon S3. A
* credentials provider chain will be used that searches for credentials in
* this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Credential profiles file at the default location (~/.aws/credentials) shared by all AWS SDKs and the AWS CLI
* - Instance Profile Credentials - delivered through the Amazon EC2
* metadata service
*
*
*
* If no credentials are found in the chain, this client will attempt to
* work in an anonymous mode where requests aren't signed. Only a subset of
* the Amazon S3 API will work with anonymous (i.e. unsigned) requests,
* but this can prove useful in some situations. For example:
*
* - If an Amazon S3 bucket has {@link Permission#Read} permission for the
* {@link GroupGrantee#AllUsers} group, anonymous clients can call
* {@link #listObjects(String)} to see what objects are stored in a bucket.
* - If an object has {@link Permission#Read} permission for the
* {@link GroupGrantee#AllUsers} group, anonymous clients can call
* {@link #getObject(String, String)} and
* {@link #getObjectMetadata(String, String)} to pull object content and
* metadata.
* - If a bucket has {@link Permission#Write} permission for the
* {@link GroupGrantee#AllUsers} group, anonymous clients can upload objects
* to the bucket.
*
*
*
* You can force the client to operate in an anonymous mode, and skip the credentials
* provider chain, by passing in null
for the credentials.
*
*
* @see AmazonS3Client#AmazonS3Client(AWSCredentials)
* @see AmazonS3Client#AmazonS3Client(AWSCredentials, ClientConfiguration)
* @sample AmazonS3.CreateClient
* @deprecated use {@link AmazonS3ClientBuilder#defaultClient()}
*/
@Deprecated
public AmazonS3Client() {
this(new S3CredentialsProviderChain());
}
/**
* Constructs a new Amazon S3 client using the specified AWS credentials to
* access Amazon S3.
*
* @param awsCredentials
* The AWS credentials to use when making requests to Amazon S3
* with this client.
*
* @see AmazonS3Client#AmazonS3Client()
* @see AmazonS3Client#AmazonS3Client(AWSCredentials, ClientConfiguration)
* @deprecated use {@link AmazonS3ClientBuilder#withCredentials(AWSCredentialsProvider)}
*/
@Deprecated
public AmazonS3Client(AWSCredentials awsCredentials) {
this(awsCredentials, configFactory.getConfig());
}
/**
* Constructs a new Amazon S3 client using the specified AWS credentials and
* client configuration to access Amazon S3.
*
* @param awsCredentials
* The AWS credentials to use when making requests to Amazon S3
* with this client.
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Amazon S3 (e.g. proxy settings, retry counts, etc).
*
* @see AmazonS3Client#AmazonS3Client()
* @see AmazonS3Client#AmazonS3Client(AWSCredentials)
* @deprecated use {@link AmazonS3ClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonS3ClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonS3Client(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration) {
this(new StaticCredentialsProvider(awsCredentials), clientConfiguration);
}
/**
* Constructs a new Amazon S3 client using the specified AWS credentials
* provider to access Amazon S3.
*
* @param credentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @deprecated use {@link AmazonS3ClientBuilder#withCredentials(AWSCredentialsProvider)}
*/
@Deprecated
public AmazonS3Client(AWSCredentialsProvider credentialsProvider) {
this(credentialsProvider, configFactory.getConfig());
}
/**
* Constructs a new Amazon S3 client using the specified AWS credentials and
* client configuration to access Amazon S3.
*
* @param credentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Amazon S3 (e.g. proxy settings, retry counts, etc).
* @deprecated use {@link AmazonS3ClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonS3ClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonS3Client(AWSCredentialsProvider credentialsProvider,
ClientConfiguration clientConfiguration) {
this(credentialsProvider, clientConfiguration, null);
}
/**
* Constructs a new Amazon S3 client using the specified AWS credentials,
* client configuration and request metric collector to access Amazon S3.
*
* @param credentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Amazon S3 (e.g. proxy settings, retry counts, etc).
* @param requestMetricCollector request metric collector
* @deprecated use {@link AmazonS3ClientBuilder#withCredentials(AWSCredentialsProvider)} and
* {@link AmazonS3ClientBuilder#withClientConfiguration(ClientConfiguration)} and
* {@link AmazonS3ClientBuilder#withMetricsCollector(RequestMetricCollector)}
*/
@Deprecated
public AmazonS3Client(AWSCredentialsProvider credentialsProvider,
ClientConfiguration clientConfiguration,
RequestMetricCollector requestMetricCollector) {
this(credentialsProvider, clientConfiguration, requestMetricCollector, SkipMd5CheckStrategy.INSTANCE);
}
/**
* Constructs a new Amazon S3 client using the specified AWS credentials,
* client configuration and request metric collector to access Amazon S3.
*
* @param credentialsProvider
* The AWS credentials provider which will provide credentials
* to authenticate requests with AWS services.
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Amazon S3 (e.g. proxy settings, retry counts, etc).
* @param requestMetricCollector request metric collector
*/
@SdkTestInternalApi
AmazonS3Client(AWSCredentialsProvider credentialsProvider,
ClientConfiguration clientConfiguration,
RequestMetricCollector requestMetricCollector,
SkipMd5CheckStrategy skipMd5CheckStrategy) {
super(clientConfiguration, requestMetricCollector, true);
this.awsCredentialsProvider = credentialsProvider;
this.skipMd5CheckStrategy = skipMd5CheckStrategy;
init();
}
/**
* Constructs a new client using the specified client configuration to
* access Amazon S3. A credentials provider chain will be used that searches
* for credentials in this order:
*
* - Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
* - Java System Properties - aws.accessKeyId and aws.secretKey
* - Instance Profile Credentials - delivered through the Amazon EC2
* metadata service
*
*
*
* If no credentials are found in the chain, this client will attempt to
* work in an anonymous mode where requests aren't signed. Only a subset of
* the Amazon S3 API will work with anonymous (i.e. unsigned)
* requests, but this can prove useful in some situations. For example:
*
* - If an Amazon S3 bucket has {@link Permission#Read} permission for the
* {@link GroupGrantee#AllUsers} group, anonymous clients can call
* {@link #listObjects(String)} to see what objects are stored in a bucket.
* - If an object has {@link Permission#Read} permission for the
* {@link GroupGrantee#AllUsers} group, anonymous clients can call
* {@link #getObject(String, String)} and
* {@link #getObjectMetadata(String, String)} to pull object content and
* metadata.
* - If a bucket has {@link Permission#Write} permission for the
* {@link GroupGrantee#AllUsers} group, anonymous clients can upload objects
* to the bucket.
*
*
*
* You can force the client to operate in an anonymous mode, and skip the
* credentials provider chain, by passing in null
for the
* credentials.
*
*
* @param clientConfiguration
* The client configuration options controlling how this client
* connects to Amazon S3 (e.g. proxy settings, retry counts, etc).
*
* @see AmazonS3Client#AmazonS3Client(AWSCredentials)
* @see AmazonS3Client#AmazonS3Client(AWSCredentials, ClientConfiguration)
* @deprecated use {@link AmazonS3ClientBuilder#withClientConfiguration(ClientConfiguration)}
*/
@Deprecated
public AmazonS3Client(ClientConfiguration clientConfiguration) {
this(new S3CredentialsProviderChain(), clientConfiguration);
}
/**
* Constructs a new client to invoke service methods on S3 using the specified parameters. All
* service calls made using this new client object are blocking, and will not return until the
* service call completes.
*
* @param s3ClientParams Object providing S3 client parameters.
* @see AmazonS3ClientBuilder For a fluent way to construct a client.
*/
@SdkInternalApi
AmazonS3Client(AmazonS3ClientParams s3ClientParams) {
super(s3ClientParams.getClientParams());
this.awsCredentialsProvider = s3ClientParams.getClientParams().getCredentialsProvider();
this.skipMd5CheckStrategy = SkipMd5CheckStrategy.INSTANCE;
setS3ClientOptions(s3ClientParams.getS3ClientOptions());
init();
}
public static AmazonS3ClientBuilder builder() {
return AmazonS3ClientBuilder.standard();
}
private void init() {
// calling this.setEndpoint(...) will also modify the signer accordingly
setEndpoint(Constants.S3_HOSTNAME);
HandlerChainFactory chainFactory = new HandlerChainFactory();
requestHandler2s.addAll(chainFactory.newRequestHandlerChain(
"/com/amazonaws/services/s3/request.handlers"));
requestHandler2s.addAll(chainFactory.newRequestHandler2Chain(
"/com/amazonaws/services/s3/request.handler2s"));
requestHandler2s.addAll(chainFactory.getGlobalHandlers());
}
/**
* @deprecated use {@link AmazonS3ClientBuilder#setEndpointConfiguration(AwsClientBuilder.EndpointConfiguration)}
*/
@Override
@Deprecated
public synchronized void setEndpoint(String endpoint) {
if (ServiceUtils.isS3AccelerateEndpoint(endpoint)) {
throw new IllegalStateException("To enable accelerate mode, please use AmazonS3ClientBuilder.withAccelerateModeEnabled(true)");
} else {
super.setEndpoint(endpoint);
/*
* Extract the region string from the endpoint if it's not known to be a
* global S3 endpoint.
*/
if (!ServiceUtils.isS3USStandardEndpoint(endpoint)) {
clientRegion = AwsHostNameUtils.parseRegionName(this.endpoint.getHost(), S3_SERVICE_NAME);
}
}
}
/**
* @deprecated use {@link AmazonS3ClientBuilder#setRegion(String)}
*/
@Override
@Deprecated
public synchronized void setRegion(com.ibm.cloud.objectstorage.regions.Region region) {
super.setRegion(region);
/*
* We need to preserve the user provided region. This is because the
* region might be mapped to a global s3 endpoint (e.g. when the client
* is in accelerate mode), in which case we won't be able to extract the
* region back from the endpoint during request signing phase.
*/
clientRegion = region.getName();
}
/**
*
* Override the default S3 client options for this client. Also set the
* endpoint to s3-accelerate if such is specified in the S3 client options.
*
*
* @param clientOptions
* The S3 client options to use.
*/
@Override
public synchronized void setS3ClientOptions(S3ClientOptions clientOptions) {
checkMutability();
this.clientOptions = new S3ClientOptions(clientOptions);
}
/**
* S3 uses wildcard certificates so we have to disable strict hostname verification when using
* SSL.
*/
@Override
protected boolean useStrictHostNameVerification() {
return false;
}
@Override
public VersionListing listNextBatchOfVersions(VersionListing previousVersionListing)
throws SdkClientException, AmazonServiceException {
return listNextBatchOfVersions(new ListNextBatchOfVersionsRequest(previousVersionListing));
}
@Override
public VersionListing listNextBatchOfVersions(ListNextBatchOfVersionsRequest listNextBatchOfVersionsRequest) {
listNextBatchOfVersionsRequest = beforeClientExecution(listNextBatchOfVersionsRequest);
rejectNull(listNextBatchOfVersionsRequest,
"The request object parameter must be specified when listing the next batch of versions in a bucket");
VersionListing previousVersionListing = listNextBatchOfVersionsRequest.getPreviousVersionListing();
if (!previousVersionListing.isTruncated()) {
VersionListing emptyListing = new VersionListing();
emptyListing.setBucketName(previousVersionListing.getBucketName());
emptyListing.setDelimiter(previousVersionListing.getDelimiter());
emptyListing.setKeyMarker(previousVersionListing.getNextKeyMarker());
emptyListing.setVersionIdMarker(previousVersionListing.getNextVersionIdMarker());
emptyListing.setMaxKeys(previousVersionListing.getMaxKeys());
emptyListing.setPrefix(previousVersionListing.getPrefix());
emptyListing.setEncodingType(previousVersionListing.getEncodingType());
emptyListing.setTruncated(false);
return emptyListing;
}
return listVersions(listNextBatchOfVersionsRequest.toListVersionsRequest());
}
@Override
public VersionListing listVersions(String bucketName, String prefix)
throws SdkClientException, AmazonServiceException {
return listVersions(new ListVersionsRequest(bucketName, prefix, null, null, null, null));
}
@Override
public VersionListing listVersions(String bucketName, String prefix, String keyMarker, String versionIdMarker, String delimiter, Integer maxKeys)
throws SdkClientException, AmazonServiceException {
ListVersionsRequest request = new ListVersionsRequest()
.withBucketName(bucketName)
.withPrefix(prefix)
.withDelimiter(delimiter)
.withKeyMarker(keyMarker)
.withVersionIdMarker(versionIdMarker)
.withMaxResults(maxKeys);
return listVersions(request);
}
@Override
public VersionListing listVersions(ListVersionsRequest listVersionsRequest)
throws SdkClientException, AmazonServiceException {
listVersionsRequest = beforeClientExecution(listVersionsRequest);
rejectNull(listVersionsRequest.getBucketName(), "The bucket name parameter must be specified when listing versions in a bucket");
/**
* This flag shows whether we need to url decode S3 key names. This flag is enabled
* only when the customers don't explicitly call {@link listVersionsRequest#setEncodingType(String)},
* otherwise, it will be disabled for maintaining backwards compatibility.
*/
final boolean shouldSDKDecodeResponse = listVersionsRequest.getEncodingType() == null;
Request request = createRequest(listVersionsRequest.getBucketName(), null, listVersionsRequest, HttpMethodName.GET);
request.addParameter("versions", null);
addParameterIfNotNull(request, "prefix", listVersionsRequest.getPrefix());
addParameterIfNotNull(request, "key-marker", listVersionsRequest.getKeyMarker());
addParameterIfNotNull(request, "version-id-marker", listVersionsRequest.getVersionIdMarker());
addParameterIfNotNull(request, "delimiter", listVersionsRequest.getDelimiter());
if (listVersionsRequest.getMaxResults() != null && listVersionsRequest.getMaxResults() >= 0) request.addParameter("max-keys", listVersionsRequest.getMaxResults().toString());
request.addParameter("encoding-type", shouldSDKDecodeResponse ? Constants.URL_ENCODING : listVersionsRequest.getEncodingType());
return invoke(request, new Unmarshallers.VersionListUnmarshaller(shouldSDKDecodeResponse), listVersionsRequest.getBucketName(), null);
}
@Override
public ObjectListing listObjects(String bucketName)
throws SdkClientException, AmazonServiceException {
return listObjects(new ListObjectsRequest(bucketName, null, null, null, null));
}
@Override
public ObjectListing listObjects(String bucketName, String prefix)
throws SdkClientException, AmazonServiceException {
return listObjects(new ListObjectsRequest(bucketName, prefix, null, null, null));
}
@Override
public ObjectListing listObjects(ListObjectsRequest listObjectsRequest)
throws SdkClientException, AmazonServiceException {
listObjectsRequest = beforeClientExecution(listObjectsRequest);
rejectNull(listObjectsRequest.getBucketName(), "The bucket name parameter must be specified when listing objects in a bucket");
/**
* This flag shows whether we need to url decode S3 key names. This flag is enabled
* only when the customers don't explicitly call {@link ListObjectsRequest#setEncodingType(String)},
* otherwise, it will be disabled for maintaining backwards compatibility.
*/
final boolean shouldSDKDecodeResponse = listObjectsRequest.getEncodingType() == null;
Request request = createRequest(listObjectsRequest.getBucketName(), null, listObjectsRequest, HttpMethodName.GET);
addParameterIfNotNull(request, "prefix", listObjectsRequest.getPrefix());
addParameterIfNotNull(request, "marker", listObjectsRequest.getMarker());
addParameterIfNotNull(request, "delimiter", listObjectsRequest.getDelimiter());
if (listObjectsRequest.getMaxKeys() != null && listObjectsRequest.getMaxKeys().intValue() >= 0) request.addParameter("max-keys", listObjectsRequest.getMaxKeys().toString());
request.addParameter("encoding-type", shouldSDKDecodeResponse ? Constants.URL_ENCODING : listObjectsRequest.getEncodingType());
addHeaderIfNotEmpty(
request, Headers.MIRROR_DESTINATION, listObjectsRequest.getWormMirrorDestination());
return invoke(request, new Unmarshallers.ListObjectsUnmarshaller(shouldSDKDecodeResponse), listObjectsRequest.getBucketName(), null);
}
@Override
public ListObjectsV2Result listObjectsV2(String bucketName)
throws SdkClientException, AmazonServiceException {
return listObjectsV2(new ListObjectsV2Request().withBucketName(bucketName));
}
@Override
public ListObjectsV2Result listObjectsV2(String bucketName, String prefix)
throws SdkClientException, AmazonServiceException {
return listObjectsV2(new ListObjectsV2Request().withBucketName(bucketName).withPrefix(prefix));
}
@Override
public ListObjectsV2Result listObjectsV2(ListObjectsV2Request listObjectsV2Request)
throws SdkClientException, AmazonServiceException {
listObjectsV2Request = beforeClientExecution(listObjectsV2Request);
rejectNull(listObjectsV2Request.getBucketName(), "The bucket name parameter must be specified when listing objects in a bucket");
Request request = createRequest(listObjectsV2Request.getBucketName(), null, listObjectsV2Request, HttpMethodName.GET);
/**
* List type '2' is required to opt-in to listObjectsV2.
*/
request.addParameter("list-type", "2");
addParameterIfNotNull(request, "start-after", listObjectsV2Request.getStartAfter());
addParameterIfNotNull(request, "continuation-token", listObjectsV2Request.getContinuationToken());
addParameterIfNotNull(request, "delimiter", listObjectsV2Request.getDelimiter());
addParameterIfNotNull(request, "max-keys", listObjectsV2Request.getMaxKeys());
addParameterIfNotNull(request, "prefix", listObjectsV2Request.getPrefix());
addParameterIfNotNull(request, "encoding-type", listObjectsV2Request.getEncodingType());
request.addParameter("fetch-owner", Boolean.toString(listObjectsV2Request.isFetchOwner()));
/**
* If URL encoding has been requested from S3 we'll automatically decode the response.
*/
final boolean shouldSDKDecodeResponse = listObjectsV2Request.getEncodingType() == Constants.URL_ENCODING;
return invoke(request, new Unmarshallers.ListObjectsV2Unmarshaller(shouldSDKDecodeResponse), listObjectsV2Request.getBucketName(), null);
}
@Override
public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing)
throws SdkClientException, AmazonServiceException {
return listNextBatchOfObjects(new ListNextBatchOfObjectsRequest(previousObjectListing));
}
@Override
public ObjectListing listNextBatchOfObjects(ListNextBatchOfObjectsRequest listNextBatchOfObjectsRequest)
throws SdkClientException, AmazonServiceException {
listNextBatchOfObjectsRequest = beforeClientExecution(listNextBatchOfObjectsRequest);
rejectNull(listNextBatchOfObjectsRequest,
"The request object parameter must be specified when listing the next batch of objects in a bucket");
ObjectListing previousObjectListing = listNextBatchOfObjectsRequest.getPreviousObjectListing();
if (!previousObjectListing.isTruncated()) {
ObjectListing emptyListing = new ObjectListing();
emptyListing.setBucketName(previousObjectListing.getBucketName());
emptyListing.setDelimiter(previousObjectListing.getDelimiter());
emptyListing.setMarker(previousObjectListing.getNextMarker());
emptyListing.setMaxKeys(previousObjectListing.getMaxKeys());
emptyListing.setPrefix(previousObjectListing.getPrefix());
emptyListing.setEncodingType(previousObjectListing.getEncodingType());
emptyListing.setTruncated(false);
return emptyListing;
}
return listObjects(listNextBatchOfObjectsRequest.toListObjectsRequest());
}
@Override
public Owner getS3AccountOwner()
throws SdkClientException, AmazonServiceException {
return getS3AccountOwner(new GetS3AccountOwnerRequest());
}
@Override
public Owner getS3AccountOwner(GetS3AccountOwnerRequest getS3AccountOwnerRequest)
throws SdkClientException, AmazonServiceException {
getS3AccountOwnerRequest = beforeClientExecution(getS3AccountOwnerRequest);
rejectNull(getS3AccountOwnerRequest, "The request object parameter getS3AccountOwnerRequest must be specified.");
Request request = createRequest(null, null, getS3AccountOwnerRequest, HttpMethodName.GET);
return invoke(request, new Unmarshallers.ListBucketsOwnerUnmarshaller(), null, null);
}
@Override
public List listBuckets(ListBucketsRequest listBucketsRequest)
throws SdkClientException, AmazonServiceException {
listBucketsRequest = beforeClientExecution(listBucketsRequest);
rejectNull(listBucketsRequest, "The request object parameter listBucketsRequest must be specified.");
Request request = createRequest(null, null, listBucketsRequest, HttpMethodName.GET);
//Add IBM Service Instance Id to headers
if ((null != this.awsCredentialsProvider ) && (this.awsCredentialsProvider.getCredentials() instanceof IBMOAuthCredentials)) {
IBMOAuthCredentials oAuthCreds = (IBMOAuthCredentials)this.awsCredentialsProvider.getCredentials();
if (oAuthCreds.getServiceInstanceId() != null) {
request.addHeader(Headers.IBM_SERVICE_INSTANCE_ID, oAuthCreds.getServiceInstanceId());
}
}
return invoke(request, new Unmarshallers.ListBucketsUnmarshaller(), null, null);
}
@Override
public List listBuckets()
throws SdkClientException, AmazonServiceException {
return listBuckets(new ListBucketsRequest());
}
@Override
public ListBucketsExtendedResponse listBucketsExtended() throws SdkClientException, AmazonServiceException {
return listBucketsExtended(new ListBucketsExtendedRequest());
}
@Override
public ListBucketsExtendedResponse listBucketsExtended(ListBucketsExtendedRequest listBucketsExtendedRequest)
throws SdkClientException, AmazonServiceException {
listBucketsExtendedRequest = beforeClientExecution(listBucketsExtendedRequest);
rejectNull(listBucketsExtendedRequest, "The request object parameter listBucketsExtendedRequest must be specified.");
Request request = createRequest(null, null, listBucketsExtendedRequest, HttpMethodName.GET);
request.addParameter("extended", null);
addParameterIfNotNull(request, "marker", listBucketsExtendedRequest.getMarker());
addParameterIfNotNull(request, "prefix", listBucketsExtendedRequest.getPrefix());
addParameterIfNotNull(request, "max-keys", listBucketsExtendedRequest.getMaxKeys());
//Add IBM Service Instance Id to headers
if ((null != this.awsCredentialsProvider ) && (this.awsCredentialsProvider.getCredentials() instanceof IBMOAuthCredentials)) {
IBMOAuthCredentials oAuthCreds = (IBMOAuthCredentials)this.awsCredentialsProvider.getCredentials();
if (oAuthCreds.getServiceInstanceId() != null) {
request.addHeader(Headers.IBM_SERVICE_INSTANCE_ID, oAuthCreds.getServiceInstanceId());
}
}
return invoke(request, new Unmarshallers.ListBucketsExtendedUnmarshaller(), null, null);
}
@Override
public Bucket createBucket(String bucketName)
throws SdkClientException, AmazonServiceException {
return createBucket(new CreateBucketRequest(bucketName));
}
@Override
@Deprecated
public Bucket createBucket(String bucketName, Region region)
throws SdkClientException, AmazonServiceException {
return createBucket(new CreateBucketRequest(bucketName, region));
}
@Override
@Deprecated
public Bucket createBucket(String bucketName, String region)
throws SdkClientException, AmazonServiceException {
return createBucket(new CreateBucketRequest(bucketName, region));
}
@Override
public Bucket createBucket(CreateBucketRequest createBucketRequest)
throws SdkClientException, AmazonServiceException {
createBucketRequest = beforeClientExecution(createBucketRequest);
rejectNull(createBucketRequest,
"The CreateBucketRequest parameter must be specified when creating a bucket");
String bucketName = createBucketRequest.getBucketName();
rejectNull(bucketName, "The bucket name parameter must be specified when creating a bucket");
bucketName = bucketName.trim();
String requestRegion = createBucketRequest.getRegion();
URI requestEndpoint = getCreateBucketEndpoint(requestRegion);
BucketNameUtils.validateBucketName(bucketName);
Request request = createRequest(bucketName, null, createBucketRequest, HttpMethodName.PUT, requestEndpoint);
request = addIAMHeaders(request, createBucketRequest);
if (createBucketRequest.getAccessControlList() != null) {
addAclHeaders(request, createBucketRequest.getAccessControlList());
} else if (createBucketRequest.getCannedAcl() != null) {
request.addHeader(Headers.S3_CANNED_ACL, createBucketRequest.getCannedAcl().toString());
}
/*
* If we're talking to a region-specific endpoint other than the US, we
* *must* specify a location constraint. Try to derive the region from
* the endpoint.
*/
if (getSignerRegion() != null && !getSignerRegion().equals("us-east-1") && StringUtils.isNullOrEmpty(requestRegion)) {
requestRegion = AwsHostNameUtils.parseRegion(requestEndpoint.getHost(), S3_SERVICE_NAME);
}
/*
* We can only send the CreateBucketConfiguration if we're *not*
* creating a bucket in the US region.
*/
if (requestRegion != null && !StringUtils.upperCase(requestRegion).equals(Region.US_Standard.toString())) {
XmlWriter xml = new XmlWriter();
xml.start("CreateBucketConfiguration", "xmlns", Constants.XML_NAMESPACE);
xml.start("LocationConstraint").value(requestRegion).end();
xml.end();
request.setContent(new ByteArrayInputStream(xml.getBytes()));
}
invoke(request, voidResponseHandler, bucketName, null);
return new Bucket(bucketName);
}
private URI getCreateBucketEndpoint(String requestRegion) {
// Route to the default endpoint if they're not trying to specify a different one in the request.
if(requestRegion == null || requestRegion.equals(clientRegion) || !clientOptions.isForceGlobalBucketAccessEnabled()) {
return endpoint;
}
// If they enabled global bucket access and they're trying to create a bucket in a region different than the default
// one specified when they created the client, it will probably fail because only us-east-1 (actually the global
// endpoint) is capable of creating buckets outside of its region. Override the endpoint to which the request
// is routed so that it will succeed.
com.ibm.cloud.objectstorage.regions.Region targetRegion = com.ibm.cloud.objectstorage.regions.Region.getRegion(Regions.fromName(requestRegion));
return new DefaultServiceEndpointBuilder(getEndpointPrefix(),
clientConfiguration.getProtocol().toString()).withRegion(targetRegion)
.getServiceEndpoint();
}
@Override
public AccessControlList getObjectAcl(String bucketName, String key)
throws SdkClientException, AmazonServiceException {
return getObjectAcl(new GetObjectAclRequest(bucketName, key));
}
@Override
public AccessControlList getObjectAcl(String bucketName, String key, String versionId)
throws SdkClientException, AmazonServiceException {
return getObjectAcl(new GetObjectAclRequest(bucketName, key, versionId));
}
@Override
public AccessControlList getObjectAcl(GetObjectAclRequest getObjectAclRequest) {
getObjectAclRequest = beforeClientExecution(getObjectAclRequest);
rejectNull(getObjectAclRequest, "The request parameter must be specified when requesting an object's ACL");
rejectNull(getObjectAclRequest.getBucketName(), "The bucket name parameter must be specified when requesting an object's ACL");
rejectNull(getObjectAclRequest.getKey(), "The key parameter must be specified when requesting an object's ACL");
addHeaderIfNotEmptyForAwsRequest(
getObjectAclRequest, Headers.MIRROR_DESTINATION, getObjectAclRequest.getWormMirrorDestination());
return getAcl(getObjectAclRequest.getBucketName(), getObjectAclRequest.getKey(),
getObjectAclRequest.getVersionId(), getObjectAclRequest.isRequesterPays(),
getObjectAclRequest);
}
@Override
public void setObjectAcl(String bucketName, String key, AccessControlList acl)
throws SdkClientException, AmazonServiceException {
setObjectAcl(bucketName, key, null, acl);
}
@Override
public void setObjectAcl(String bucketName, String key, CannedAccessControlList acl)
throws SdkClientException, AmazonServiceException {
setObjectAcl(bucketName, key, null, acl);
}
@Override
public void setObjectAcl(String bucketName, String key, String versionId, AccessControlList acl)
throws SdkClientException, AmazonServiceException {
setObjectAcl(new SetObjectAclRequest(bucketName, key, versionId, acl));
}
/**
* Same as {@link #setObjectAcl(String, String, String, AccessControlList)}
* but allows specifying a request metric collector.
*/
public void setObjectAcl(String bucketName, String key, String versionId,
AccessControlList acl, RequestMetricCollector requestMetricCollector)
throws SdkClientException, AmazonServiceException {
setObjectAcl(new SetObjectAclRequest(bucketName, key, versionId, acl)
. withRequestMetricCollector(requestMetricCollector));
}
@Override
public void setObjectAcl(String bucketName, String key, String versionId, CannedAccessControlList acl)
throws SdkClientException, AmazonServiceException {
setObjectAcl(new SetObjectAclRequest(bucketName, key, versionId, acl));
}
/**
* Same as {@link #setObjectAcl(String, String, String, CannedAccessControlList)}
* but allows specifying a request metric collector.
*/
public void setObjectAcl(String bucketName, String key, String versionId,
CannedAccessControlList acl,
RequestMetricCollector requestMetricCollector) {
setObjectAcl(new SetObjectAclRequest(bucketName, key, versionId, acl)
. withRequestMetricCollector(requestMetricCollector));
}
@Override
public void setObjectAcl(SetObjectAclRequest setObjectAclRequest)
throws SdkClientException, AmazonServiceException {
setObjectAclRequest = beforeClientExecution(setObjectAclRequest);
rejectNull(setObjectAclRequest,
"The request must not be null.");
rejectNull(setObjectAclRequest.getBucketName(),
"The bucket name parameter must be specified when setting an object's ACL");
rejectNull(setObjectAclRequest.getKey(),
"The key parameter must be specified when setting an object's ACL");
if (setObjectAclRequest.getAcl() != null && setObjectAclRequest.getCannedAcl() != null) {
throw new IllegalArgumentException(
"Only one of the ACL and CannedACL parameters can be specified, not both.");
}
if (setObjectAclRequest.getAcl() != null) {
setAcl(setObjectAclRequest.getBucketName(),
setObjectAclRequest.getKey(),
setObjectAclRequest.getVersionId(),
setObjectAclRequest.getAcl(),
setObjectAclRequest.isRequesterPays(),
setObjectAclRequest);
} else if (setObjectAclRequest.getCannedAcl() != null) {
setAcl(setObjectAclRequest.getBucketName(),
setObjectAclRequest.getKey(),
setObjectAclRequest.getVersionId(),
setObjectAclRequest.getCannedAcl(),
setObjectAclRequest.isRequesterPays(),
setObjectAclRequest);
} else {
throw new IllegalArgumentException(
"At least one of the ACL and CannedACL parameters should be specified");
}
}
/**
* {@inheritDoc}
* @see #getBucketAcl(String)
*/
@Override
public AccessControlList getBucketAcl(String bucketName)
throws SdkClientException, AmazonServiceException {
return getBucketAcl(new GetBucketAclRequest(bucketName));
}
@Override
public AccessControlList getBucketAcl(GetBucketAclRequest getBucketAclRequest)
throws SdkClientException, AmazonServiceException {
getBucketAclRequest = beforeClientExecution(getBucketAclRequest);
String bucketName = getBucketAclRequest.getBucketName();
rejectNull(bucketName, "The bucket name parameter must be specified when requesting a bucket's ACL");
return getAcl(bucketName, null, null, false, getBucketAclRequest);
}
@Override
public void setBucketAcl(String bucketName, AccessControlList acl)
throws SdkClientException, AmazonServiceException {
setBucketAcl(new SetBucketAclRequest(bucketName, acl));
}
/**
* Same as {@link #setBucketAcl(String, AccessControlList)}
* but allows specifying a request metric collector.
*/
public void setBucketAcl(String bucketName, AccessControlList acl,
RequestMetricCollector requestMetricCollector) {
SetBucketAclRequest request = new SetBucketAclRequest(bucketName, acl)
.withRequestMetricCollector(requestMetricCollector);
setBucketAcl(request);
}
@Override
public void setBucketAcl(String bucketName, CannedAccessControlList cannedAcl)
throws SdkClientException, AmazonServiceException {
setBucketAcl(new SetBucketAclRequest(bucketName, cannedAcl));
}
/**
* Same as {@link #setBucketAcl(String, CannedAccessControlList)}
* but allows specifying a request metric collector.
*/
public void setBucketAcl(String bucketName, CannedAccessControlList cannedAcl,
RequestMetricCollector requestMetricCollector) throws SdkClientException,
AmazonServiceException {
SetBucketAclRequest request = new SetBucketAclRequest(bucketName, cannedAcl)
.withRequestMetricCollector(requestMetricCollector);
setBucketAcl(request);
}
@Override
public void setBucketAcl(SetBucketAclRequest setBucketAclRequest)
throws SdkClientException, AmazonServiceException {
setBucketAclRequest = beforeClientExecution(setBucketAclRequest);
String bucketName = setBucketAclRequest.getBucketName();
rejectNull(bucketName, "The bucket name parameter must be specified when setting a bucket's ACL");
AccessControlList acl = setBucketAclRequest.getAcl();
CannedAccessControlList cannedAcl = setBucketAclRequest.getCannedAcl();
if (acl == null && cannedAcl == null) {
throw new IllegalArgumentException(
"The ACL parameter must be specified when setting a bucket's ACL");
}
if (acl != null && cannedAcl != null) {
throw new IllegalArgumentException(
"Only one of the acl and cannedAcl parameter can be specified, not both.");
}
if (acl != null) {
setAcl(bucketName, null, null, acl, false, setBucketAclRequest);
} else {
setAcl(bucketName, null, null, cannedAcl, false, setBucketAclRequest);
}
}
@Override
public ObjectMetadata getObjectMetadata(String bucketName, String key)
throws SdkClientException, AmazonServiceException {
return getObjectMetadata(new GetObjectMetadataRequest(bucketName, key));
}
@Override
public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest)
throws SdkClientException, AmazonServiceException {
getObjectMetadataRequest = beforeClientExecution(getObjectMetadataRequest);
rejectNull(getObjectMetadataRequest, "The GetObjectMetadataRequest parameter must be specified when requesting an object's metadata");
String bucketName = getObjectMetadataRequest.getBucketName();
String key = getObjectMetadataRequest.getKey();
String versionId = getObjectMetadataRequest.getVersionId();
rejectNull(bucketName, "The bucket name parameter must be specified when requesting an object's metadata");
rejectNull(key, "The key parameter must be specified when requesting an object's metadata");
Request request = createRequest(bucketName, key, getObjectMetadataRequest, HttpMethodName.HEAD);
if (versionId != null) request.addParameter("versionId", versionId);
populateRequesterPaysHeader(request, getObjectMetadataRequest.isRequesterPays());
addPartNumberIfNotNull(request, getObjectMetadataRequest.getPartNumber());
addHeaderIfNotEmpty(
request, Headers.MIRROR_DESTINATION, getObjectMetadataRequest.getWormMirrorDestination());
populateSSE_C(request, getObjectMetadataRequest.getSSECustomerKey());
return invoke(request, new S3MetadataResponseHandler(), bucketName, key);
}
@Override
public S3Object getObject(String bucketName, String key)
throws SdkClientException, AmazonServiceException {
return getObject(new GetObjectRequest(bucketName, key));
}
@Override
public boolean doesBucketExist(String bucketName)
throws SdkClientException, AmazonServiceException {
try {
ValidationUtils.assertStringNotEmpty(bucketName, "bucketName");
headBucket(new HeadBucketRequest(bucketName));
return true;
} catch (AmazonServiceException ase) {
// A redirect error or a forbidden error means the bucket exists. So
// returning true.
if ((ase.getStatusCode() == Constants.BUCKET_REDIRECT_STATUS_CODE)
|| (ase.getStatusCode() == Constants.BUCKET_ACCESS_FORBIDDEN_STATUS_CODE)) {
return true;
}
if (ase.getStatusCode() == Constants.NO_SUCH_BUCKET_STATUS_CODE) {
return false;
}
throw ase;
}
}
@Override
public boolean doesBucketExistV2(String bucketName) throws SdkClientException {
try {
ValidationUtils.assertStringNotEmpty(bucketName, "bucketName");
getBucketAcl(bucketName);
return true;
} catch (AmazonServiceException ase) {
// A redirect error or an AccessDenied exception means the bucket exists but it's not in this region
// or we don't have permissions to it.
if ((ase.getStatusCode() == Constants.BUCKET_REDIRECT_STATUS_CODE) || "AccessDenied".equals(ase.getErrorCode())) {
return true;
}
if (ase.getStatusCode() == Constants.NO_SUCH_BUCKET_STATUS_CODE) {
return false;
}
throw ase;
}
}
@Override
public boolean doesObjectExist(String bucketName, String objectName)
throws AmazonServiceException, SdkClientException {
try {
ValidationUtils.assertStringNotEmpty(bucketName, "bucketName");
ValidationUtils.assertStringNotEmpty(objectName, "objectName");
getObjectMetadata(bucketName, objectName);
return true;
} catch (AmazonS3Exception e) {
if (e.getStatusCode() == 404) {
return false;
}
throw e;
}
}
@Override
public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest)
throws SdkClientException, AmazonServiceException {
headBucketRequest = beforeClientExecution(headBucketRequest);
String bucketName = headBucketRequest.getBucketName();
rejectNull(bucketName,
"The bucketName parameter must be specified.");
Request request = createRequest(bucketName, null,
headBucketRequest, HttpMethodName.HEAD);
return invoke(request, new HeadBucketResultHandler(), bucketName, null);
}
@Override
public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass)
throws SdkClientException, AmazonServiceException {
rejectNull(bucketName,
"The bucketName parameter must be specified when changing an object's storage class");
rejectNull(key,
"The key parameter must be specified when changing an object's storage class");
rejectNull(newStorageClass,
"The newStorageClass parameter must be specified when changing an object's storage class");
copyObject(new CopyObjectRequest(bucketName, key, bucketName, key)
.withStorageClass(newStorageClass.toString()));
}
@Override
public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation)
throws SdkClientException, AmazonServiceException {
rejectNull(bucketName,
"The bucketName parameter must be specified when changing an object's storage class");
rejectNull(key,
"The key parameter must be specified when changing an object's storage class");
rejectNull(newRedirectLocation,
"The newStorageClass parameter must be specified when changing an object's storage class");
copyObject(new CopyObjectRequest(bucketName, key, bucketName, key)
.withRedirectLocation(newRedirectLocation));
}
@Override
public S3Object getObject(GetObjectRequest getObjectRequest)
throws SdkClientException, AmazonServiceException {
getObjectRequest = beforeClientExecution(getObjectRequest);
assertNotNull(getObjectRequest, "GetObjectRequest");
assertStringNotEmpty(getObjectRequest.getBucketName(), "BucketName");
assertStringNotEmpty(getObjectRequest.getKey(), "Key");
Request request = createRequest(getObjectRequest.getBucketName(), getObjectRequest.getKey(), getObjectRequest, HttpMethodName.GET);
if (getObjectRequest.getVersionId() != null) {
request.addParameter("versionId", getObjectRequest.getVersionId());
}
addPartNumberIfNotNull(request, getObjectRequest.getPartNumber());
addHeaderIfNotEmpty(
request, Headers.MIRROR_DESTINATION, getObjectRequest.getWormMirrorDestination());
// Range
long[] range = getObjectRequest.getRange();
if (range != null) {
request.addHeader(Headers.RANGE, "bytes=" + Long.toString(range[0]) + "-" + Long.toString(range[1]));
}
populateRequesterPaysHeader(request, getObjectRequest.isRequesterPays());
addResponseHeaderParameters(request, getObjectRequest.getResponseHeaders());
addDateHeader(request, Headers.GET_OBJECT_IF_MODIFIED_SINCE,
getObjectRequest.getModifiedSinceConstraint());
addDateHeader(request, Headers.GET_OBJECT_IF_UNMODIFIED_SINCE,
getObjectRequest.getUnmodifiedSinceConstraint());
addStringListHeader(request, Headers.GET_OBJECT_IF_MATCH,
getObjectRequest.getMatchingETagConstraints());
addStringListHeader(request, Headers.GET_OBJECT_IF_NONE_MATCH,
getObjectRequest.getNonmatchingETagConstraints());
// Populate the SSE-C parameters to the request header
populateSSE_C(request, getObjectRequest.getSSECustomerKey());
final ProgressListener listener = getObjectRequest.getGeneralProgressListener();
publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT);
try {
S3Object s3Object = invoke(request, new S3ObjectResponseHandler(),
getObjectRequest.getBucketName(), getObjectRequest.getKey());
/*
* TODO: For now, it's easiest to set there here in the client, but
* we could push this back into the response handler with a
* little more work.
*/
s3Object.setBucketName(getObjectRequest.getBucketName());
s3Object.setKey(getObjectRequest.getKey());
InputStream is = s3Object.getObjectContent();
HttpRequestBase httpRequest = s3Object.getObjectContent().getHttpRequest();
// Hold a reference to this client while the InputStream is still
// around - otherwise a finalizer in the HttpClient may reset the
// underlying TCP connection out from under us.
is = new ServiceClientHolderInputStream(is, this);
// used trigger a tranfer complete event when the stream is entirely consumed
ProgressInputStream progressInputStream =
new ProgressInputStream(is, listener) {
@Override protected void onEOF() {
publishProgress(getListener(), ProgressEventType.TRANSFER_COMPLETED_EVENT);
}
};
is = progressInputStream;
// The Etag header contains a server-side MD5 of the object. If
// we're downloading the whole object, by default we wrap the
// stream in a validator that calculates an MD5 of the downloaded
// bytes and complains if what we received doesn't match the Etag.
if (!skipMd5CheckStrategy.skipClientSideValidation(getObjectRequest, s3Object.getObjectMetadata())) {
byte[] serverSideHash = BinaryUtils.fromHex(s3Object.getObjectMetadata().getETag());
try {
// No content length check is performed when the
// MD5 check is enabled, since a correct MD5 check would
// imply a correct content length.
MessageDigest digest = MessageDigest.getInstance("MD5");
is = new DigestValidationInputStream(is, digest, serverSideHash);
} catch (NoSuchAlgorithmException e) {
log.warn("No MD5 digest algorithm available. Unable to calculate "
+ "checksum and verify data integrity.", e);
}
} else {
// Ensures the data received from S3 has the same length as the
// expected content-length
is = new LengthCheckInputStream(is,
s3Object.getObjectMetadata().getContentLength(), // expected length
INCLUDE_SKIPPED_BYTES); // bytes received from S3 are all included even if skipped
}
s3Object.setObjectContent(new S3ObjectInputStream(is, httpRequest, false));
return s3Object;
} catch (AmazonS3Exception ase) {
/*
* If the request failed because one of the specified constraints
* was not met (ex: matching ETag, modified since date, etc.), then
* return null, so that users don't have to wrap their code in
* try/catch blocks and check for this status code if they want to
* use constraints.
*/
if (ase.getStatusCode() == 412 || ase.getStatusCode() == 304) {
publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT);
return null;
}
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
throw ase;
}
}
@Override
public ObjectMetadata getObject(final GetObjectRequest getObjectRequest, File destinationFile)
throws SdkClientException, AmazonServiceException {
rejectNull(destinationFile,
"The destination file parameter must be specified when downloading an object directly to a file");
S3Object s3Object = ServiceUtils.retryableDownloadS3ObjectToFile(destinationFile, new ServiceUtils.RetryableS3DownloadTask() {
@Override
public S3Object getS3ObjectStream() {
return getObject(getObjectRequest);
}
@Override
public boolean needIntegrityCheck() {
return !skipMd5CheckStrategy.skipClientSideValidationPerRequest(getObjectRequest);
}
}, ServiceUtils.OVERWRITE_MODE);
// getObject can return null if constraints were specified but not met
if (s3Object == null) return null;
return s3Object.getObjectMetadata();
}
@Override
public String getObjectAsString(String bucketName, String key)
throws AmazonServiceException, SdkClientException {
rejectNull(bucketName, "Bucket name must be provided");
rejectNull(key, "Object key must be provided");
S3Object object = getObject(bucketName, key);
try {
return IOUtils.toString(object.getObjectContent());
} catch (IOException e) {
throw new SdkClientException("Error streaming content from S3 during download");
} finally {
IOUtils.closeQuietly(object, log);
}
}
@Override
public GetObjectTaggingResult getObjectTagging(GetObjectTaggingRequest getObjectTaggingRequest) {
getObjectTaggingRequest = beforeClientExecution(getObjectTaggingRequest);
rejectNull(getObjectTaggingRequest,
"The request parameter must be specified when getting the object tags");
String bucketName = assertStringNotEmpty(getObjectTaggingRequest.getBucketName(), "BucketName");
String key = assertNotNull(getObjectTaggingRequest.getKey(), "Key");
Request request = createRequest(bucketName, key, getObjectTaggingRequest, HttpMethodName.GET);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "GetObjectTagging");
request.addParameter("tagging", null);
addParameterIfNotNull(request, "versionId", getObjectTaggingRequest.getVersionId());
ResponseHeaderHandlerChain handlerChain = new ResponseHeaderHandlerChain(
new Unmarshallers.GetObjectTaggingResponseUnmarshaller(),
new GetObjectTaggingResponseHeaderHandler()
);
return invoke(request, handlerChain, bucketName, key);
}
@Override
public SetObjectTaggingResult setObjectTagging(SetObjectTaggingRequest setObjectTaggingRequest) {
setObjectTaggingRequest = beforeClientExecution(setObjectTaggingRequest);
rejectNull(setObjectTaggingRequest,
"The request parameter must be specified setting the object tags");
String bucketName = assertStringNotEmpty(setObjectTaggingRequest.getBucketName(), "BucketName");
String key = assertNotNull(setObjectTaggingRequest.getKey(), "Key");
ObjectTagging tagging = assertNotNull(setObjectTaggingRequest.getTagging(), "ObjectTagging");
Request request = createRequest(bucketName, key, setObjectTaggingRequest, HttpMethodName.PUT);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutObjectTagging");
request.addParameter("tagging", null);
addParameterIfNotNull(request, "versionId", setObjectTaggingRequest.getVersionId());
byte[] content = new ObjectTaggingXmlFactory().convertToXmlByteArray(tagging);
setContent(request, content, "application/xml", true);
ResponseHeaderHandlerChain handlerChain = new ResponseHeaderHandlerChain(
new Unmarshallers.SetObjectTaggingResponseUnmarshaller(),
new SetObjectTaggingResponseHeaderHandler()
);
return invoke(request, handlerChain, bucketName, key);
}
@Override
public DeleteObjectTaggingResult deleteObjectTagging(DeleteObjectTaggingRequest deleteObjectTaggingRequest) {
deleteObjectTaggingRequest = beforeClientExecution(deleteObjectTaggingRequest);
rejectNull(deleteObjectTaggingRequest, "The request parameter must be specified when delete the object tags");
String bucketName = assertStringNotEmpty(deleteObjectTaggingRequest.getBucketName(), "BucketName");
String key = assertStringNotEmpty(deleteObjectTaggingRequest.getKey(), "Key");
Request request = createRequest(bucketName, key, deleteObjectTaggingRequest, HttpMethodName.DELETE);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteObjectTagging");
request.addParameter("tagging", null);
addParameterIfNotNull(request, "versionId", deleteObjectTaggingRequest.getVersionId());
ResponseHeaderHandlerChain handlerChain = new ResponseHeaderHandlerChain(
new Unmarshallers.DeleteObjectTaggingResponseUnmarshaller(),
new DeleteObjectTaggingHeaderHandler()
);
return invoke(request, handlerChain, bucketName, key);
}
@Override
public void deleteBucket(String bucketName)
throws SdkClientException, AmazonServiceException {
deleteBucket(new DeleteBucketRequest(bucketName));
}
@Override
public void deleteBucket(DeleteBucketRequest deleteBucketRequest)
throws SdkClientException, AmazonServiceException {
deleteBucketRequest = beforeClientExecution(deleteBucketRequest);
rejectNull(deleteBucketRequest,
"The DeleteBucketRequest parameter must be specified when deleting a bucket");
String bucketName = deleteBucketRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when deleting a bucket");
Request request = createRequest(bucketName, null, deleteBucketRequest, HttpMethodName.DELETE);
invoke(request, voidResponseHandler, bucketName, null);
bucketRegionCache.remove(bucketName);
}
@Override
public PutObjectResult putObject(String bucketName, String key, File file)
throws SdkClientException, AmazonServiceException {
return putObject(new PutObjectRequest(bucketName, key, file)
.withMetadata(new ObjectMetadata()));
}
@Override
public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata)
throws SdkClientException, AmazonServiceException {
return putObject(new PutObjectRequest(bucketName, key, input, metadata));
}
@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest)
throws SdkClientException, AmazonServiceException {
putObjectRequest = beforeClientExecution(putObjectRequest);
rejectNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object");
final File file = putObjectRequest.getFile();
final InputStream isOrig = putObjectRequest.getInputStream();
final String bucketName = putObjectRequest.getBucketName();
final String key = putObjectRequest.getKey();
ObjectMetadata metadata = putObjectRequest.getMetadata();
InputStream input = isOrig;
if (metadata == null)
metadata = new ObjectMetadata();
rejectNull(bucketName, "The bucket name parameter must be specified when uploading an object");
rejectNull(key, "The key parameter must be specified when uploading an object");
// If a file is specified for upload, we need to pull some additional
// information from it to auto-configure a few options
if (file == null) {
// When input is a FileInputStream, this wrapping enables
// unlimited mark-and-reset
if (input != null)
input = ReleasableInputStream.wrap(input);
} else {
// Always set the content length, even if it's already set
metadata.setContentLength(file.length());
final boolean calculateMD5 = metadata.getContentMD5() == null;
// Only set the content type if it hasn't already been set
if (metadata.getContentType() == null) {
metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
}
if (calculateMD5 && !skipMd5CheckStrategy.skipServerSideValidation(putObjectRequest)) {
try {
String contentMd5_b64 = Md5Utils.md5AsBase64(file);
metadata.setContentMD5(contentMd5_b64);
} catch (Exception e) {
throw new SdkClientException(
"Unable to calculate MD5 hash: " + e.getMessage(), e);
}
}
input = newResettableInputStream(file, "Unable to find file to upload");
}
final ProgressListener listener;
final ObjectMetadata returnedMetadata;
MD5DigestCalculatingInputStream md5DigestStream = null;
try {
Request request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);
// Make backward compatible with buffer size via system property
final Integer bufsize = Constants.getS3StreamBufferSize();
if (bufsize != null) {
AmazonWebServiceRequest awsreq = request.getOriginalRequest();
// Note awsreq is never null at this point even if the original
// request was
awsreq.getRequestClientOptions()
.setReadLimit(bufsize.intValue());
}
if ( putObjectRequest.getAccessControlList() != null) {
addAclHeaders(request, putObjectRequest.getAccessControlList());
} else if ( putObjectRequest.getCannedAcl() != null ) {
request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
}
if (putObjectRequest.getStorageClass() != null) {
request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
}
if (putObjectRequest.getRedirectLocation() != null) {
request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
if (input == null) {
input = new ByteArrayInputStream(new byte[0]);
}
}
addHeaderIfNotNull(request, Headers.S3_TAGGING, urlEncodeTags(putObjectRequest.getTagging()));
populateRequesterPaysHeader(request, putObjectRequest.isRequesterPays());
// Populate the SSE-C parameters to the request header
populateSSE_C(request, putObjectRequest.getSSECustomerKey());
// Populate the SSE AWS KMS parameters to the request header
populateSSE_KMS(request,
putObjectRequest.getSSEAwsKeyManagementParams());
// Populate the object retention parameters to the request header
if (putObjectRequest.getRetentionExpirationDate() != null) {
request.addHeader(Headers.RETENTION_EXPIRATION_DATE,
DateUtils.formatRFC822Date(putObjectRequest.getRetentionExpirationDate()));
}
if (putObjectRequest.getRetentionLegalHoldId() != null) {
request.addHeader(Headers.RETENTION_LEGAL_HOLD_ID,
putObjectRequest.getRetentionLegalHoldId());
}
if (putObjectRequest.getRetentionPeriod() != null) {
request.addHeader(Headers.RETENTION_PERIOD,
putObjectRequest.getRetentionPeriod().toString());
}
// Use internal interface to differentiate 0 from unset.
final Long contentLength = (Long)metadata.getRawMetadataValue(Headers.CONTENT_LENGTH);
if (contentLength == null) {
/*
* There's nothing we can do except for let the HTTP client buffer
* the input stream contents if the caller doesn't tell us how much
* data to expect in a stream since we have to explicitly tell
* Amazon S3 how much we're sending before we start sending any of
* it.
*/
log.warn("No content length specified for stream data. " +
"Stream contents will be buffered in memory and could result in " +
"out of memory errors.");
} else {
final long expectedLength = contentLength.longValue();
if (expectedLength >= 0) {
// Performs length check on the underlying data stream.
// For S3 encryption client, the underlying data stream here
// refers to the cipher-text data stream (ie not the underlying
// plain-text data stream which in turn may have been wrapped
// with it's own length check input stream.)
LengthCheckInputStream lcis = new LengthCheckInputStream(
input,
expectedLength, // expected data length to be uploaded
EXCLUDE_SKIPPED_BYTES);
input = lcis;
}
}
if (metadata.getContentMD5() == null
&& !skipMd5CheckStrategy.skipClientSideValidationPerRequest(putObjectRequest)) {
/*
* If the user hasn't set the content MD5, then we don't want to buffer the whole
* stream in memory just to calculate it. Instead, we can calculate it on the fly
* and validate it with the returned ETag from the object upload.
*/
input = md5DigestStream = new MD5DigestCalculatingInputStream(input);
}
if (metadata.getContentType() == null) {
/*
* Default to the "application/octet-stream" if the user hasn't
* specified a content type.
*/
metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
}
populateRequestMetadata(request, metadata);
request.setContent(input);
listener = putObjectRequest.getGeneralProgressListener();
publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT);
try {
returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
} catch (Throwable t) {
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
throw failure(t);
}
} finally {
cleanupDataSource(putObjectRequest, file, isOrig, input, log);
}
String contentMd5 = metadata.getContentMD5();
if (md5DigestStream != null) {
contentMd5 = Base64.encodeAsString(md5DigestStream.getMd5Digest());
}
final String etag = returnedMetadata.getETag();
if (contentMd5 != null && !skipMd5CheckStrategy.skipClientSideValidationPerPutResponse(returnedMetadata)) {
byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
byte[] serverSideHash = BinaryUtils.fromHex(etag);
if (!Arrays.equals(clientSideHash, serverSideHash)) {
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
throw new SdkClientException(
"Unable to verify integrity of data upload. "
+ "Client calculated content hash (contentMD5: "
+ contentMd5
+ " in base 64) didn't match hash (etag: "
+ etag
+ " in hex) calculated by Amazon S3. "
+ "You may need to delete the data stored in Amazon S3. (metadata.contentMD5: "
+ metadata.getContentMD5()
+ ", md5DigestStream: " + md5DigestStream
+ ", bucketName: " + bucketName + ", key: " + key
+ ")");
}
}
publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);
final PutObjectResult result = createPutObjectResult(returnedMetadata);
result.setContentMd5(contentMd5);
return result;
}
private static PutObjectResult createPutObjectResult(ObjectMetadata metadata) {
final PutObjectResult result = new PutObjectResult();
result.setVersionId(metadata.getVersionId());
result.setSSEAlgorithm(metadata.getSSEAlgorithm());
result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm());
result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5());
result.setExpirationTime(metadata.getExpirationTime());
result.setExpirationTimeRuleId(metadata.getExpirationTimeRuleId());
result.setETag(metadata.getETag());
result.setMetadata(metadata);
result.setRequesterCharged(metadata.isRequesterCharged());
return result;
}
/**
* Sets the access control headers for the request given.
*/
private static void addAclHeaders(Request extends AmazonWebServiceRequest> request, AccessControlList acl) {
List grants = acl.getGrantsAsList();
Map> grantsByPermission = new HashMap>();
for ( Grant grant : grants ) {
if ( !grantsByPermission.containsKey(grant.getPermission()) ) {
grantsByPermission.put(grant.getPermission(), new LinkedList());
}
grantsByPermission.get(grant.getPermission()).add(grant.getGrantee());
}
for ( Permission permission : Permission.values() ) {
if ( grantsByPermission.containsKey(permission) ) {
Collection grantees = grantsByPermission.get(permission);
boolean seenOne = false;
StringBuilder granteeString = new StringBuilder();
for ( Grantee grantee : grantees ) {
if ( !seenOne )
seenOne = true;
else
granteeString.append(", ");
granteeString.append(grantee.getTypeIdentifier()).append("=").append("\"")
.append(grantee.getIdentifier()).append("\"");
}
request.addHeader(permission.getHeaderName(), granteeString.toString());
}
}
}
@Override
public CopyObjectResult copyObject(String sourceBucketName, String sourceKey,
String destinationBucketName, String destinationKey)
throws SdkClientException, AmazonServiceException {
return copyObject(new CopyObjectRequest(sourceBucketName, sourceKey,
destinationBucketName, destinationKey));
}
@Override
public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest)
throws SdkClientException, AmazonServiceException {
copyObjectRequest = beforeClientExecution(copyObjectRequest);
rejectNull(copyObjectRequest.getSourceBucketName(),
"The source bucket name must be specified when copying an object");
rejectNull(copyObjectRequest.getSourceKey(),
"The source object key must be specified when copying an object");
rejectNull(copyObjectRequest.getDestinationBucketName(),
"The destination bucket name must be specified when copying an object");
rejectNull(copyObjectRequest.getDestinationKey(),
"The destination object key must be specified when copying an object");
String destinationKey = copyObjectRequest.getDestinationKey();
String destinationBucketName = copyObjectRequest.getDestinationBucketName();
Request request = createRequest(destinationBucketName, destinationKey, copyObjectRequest, HttpMethodName.PUT);
populateRequestWithCopyObjectParameters(request, copyObjectRequest);
// Populate the SSE AWS KMS parameters to the request header
populateSSE_KMS(request,
copyObjectRequest.getSSEAwsKeyManagementParams());
/*
* We can't send a non-zero length Content-Length header if the user
* specified it, otherwise it messes up the HTTP connection when the
* remote server thinks there's more data to pull.
*/
setZeroContentLength(request);
CopyObjectResultHandler copyObjectResultHandler = null;
try {
@SuppressWarnings("unchecked")
ResponseHeaderHandlerChain handler = new ResponseHeaderHandlerChain(
// xml payload unmarshaller
new Unmarshallers.CopyObjectUnmarshaller(),
// header handlers
new ServerSideEncryptionHeaderHandler(),
new S3VersionHeaderHandler(),
new ObjectExpirationHeaderHandler(),
new S3RequesterChargedHeaderHandler());
copyObjectResultHandler = invoke(request, handler, destinationBucketName, destinationKey);
} catch (AmazonS3Exception ase) {
/*
* If the request failed because one of the specified constraints
* was not met (ex: matching ETag, modified since date, etc.), then
* return null, so that users don't have to wrap their code in
* try/catch blocks and check for this status code if they want to
* use constraints.
*/
if (ase.getStatusCode() == Constants.FAILED_PRECONDITION_STATUS_CODE) {
return null;
}
throw ase;
}
/*
* CopyObject has two failure modes:
* 1 - An HTTP error code is returned and the error is processed like any
* other error response.
* 2 - An HTTP 200 OK code is returned, but the response content contains
* an XML error response.
*
* This makes it very difficult for the client runtime to cleanly detect
* this case and handle it like any other error response. We could
* extend the runtime to have a more flexible/customizable definition of
* success/error (per request), but it's probably overkill for this
* one special case.
*/
if (copyObjectResultHandler.getErrorCode() != null) {
String errorCode = copyObjectResultHandler.getErrorCode();
String errorMessage = copyObjectResultHandler.getErrorMessage();
String requestId = copyObjectResultHandler.getErrorRequestId();
String hostId = copyObjectResultHandler.getErrorHostId();
AmazonS3Exception ase = new AmazonS3Exception(errorMessage);
ase.setErrorCode(errorCode);
ase.setErrorType(ErrorType.Service);
ase.setRequestId(requestId);
ase.setExtendedRequestId(hostId);
ase.setServiceName(request.getServiceName());
ase.setStatusCode(200);
throw ase;
}
// TODO: Might be nice to create this in our custom S3VersionHeaderHandler
CopyObjectResult copyObjectResult = new CopyObjectResult();
copyObjectResult.setETag(copyObjectResultHandler.getETag());
copyObjectResult.setLastModifiedDate(copyObjectResultHandler.getLastModified());
copyObjectResult.setVersionId(copyObjectResultHandler.getVersionId());
copyObjectResult.setSSEAlgorithm(copyObjectResultHandler.getSSEAlgorithm());
copyObjectResult.setSSECustomerAlgorithm(copyObjectResultHandler.getSSECustomerAlgorithm());
copyObjectResult.setSSECustomerKeyMd5(copyObjectResultHandler.getSSECustomerKeyMd5());
copyObjectResult.setExpirationTime(copyObjectResultHandler.getExpirationTime());
copyObjectResult.setExpirationTimeRuleId(copyObjectResultHandler.getExpirationTimeRuleId());
copyObjectResult.setRequesterCharged(copyObjectResultHandler.isRequesterCharged());
return copyObjectResult;
}
/**
* Copies a source object to a part of a multipart upload.
*
* To copy an object, the caller's account must have read access to the source object and
* write access to the destination bucket.
*
*
* If constraints are specified in the CopyPartRequest
* (e.g.
* {@link CopyPartRequest#setMatchingETagConstraints(List)})
* and are not satisfied when Amazon S3 receives the
* request, this method returns null
.
* This method returns a non-null result under all other
* circumstances.
*
*
* @param copyPartRequest
* The request object containing all the options for copying an
* Amazon S3 object.
*
* @return A {@link CopyPartResult} object containing the information
* returned by Amazon S3 about the newly created object, or null
if
* constraints were specified that weren't met when Amazon S3 attempted
* to copy the object.
*
* @throws SdkClientException
* If any errors are encountered in the client while making the
* request or handling the response.
* @throws AmazonServiceException
* If any errors occurred in Amazon S3 while processing the
* request.
*
* @see AmazonS3#copyObject(CopyObjectRequest)
* @see AmazonS3#initiateMultipartUpload(InitiateMultipartUploadRequest)
*/
@Override
public CopyPartResult copyPart(CopyPartRequest copyPartRequest) {
copyPartRequest = beforeClientExecution(copyPartRequest);
rejectNull(copyPartRequest.getSourceBucketName(),
"The source bucket name must be specified when copying a part");
rejectNull(copyPartRequest.getSourceKey(),
"The source object key must be specified when copying a part");
rejectNull(copyPartRequest.getDestinationBucketName(),
"The destination bucket name must be specified when copying a part");
rejectNull(copyPartRequest.getUploadId(),
"The upload id must be specified when copying a part");
rejectNull(copyPartRequest.getDestinationKey(),
"The destination object key must be specified when copying a part");
rejectNull(copyPartRequest.getPartNumber(),
"The part number must be specified when copying a part");
String destinationKey = copyPartRequest.getDestinationKey();
String destinationBucketName = copyPartRequest.getDestinationBucketName();
Request request = createRequest(destinationBucketName, destinationKey, copyPartRequest,
HttpMethodName.PUT);
populateRequestWithCopyPartParameters(request, copyPartRequest);
request.addParameter("uploadId", copyPartRequest.getUploadId());
request.addParameter("partNumber", Integer.toString(copyPartRequest.getPartNumber()));
populateRequesterPaysHeader(request, copyPartRequest.isRequesterPays());
/*
* We can't send a non-zero length Content-Length header if the user
* specified it, otherwise it messes up the HTTP connection when the
* remote server thinks there's more data to pull.
*/
setZeroContentLength(request);
CopyObjectResultHandler copyObjectResultHandler = null;
try {
@SuppressWarnings("unchecked")
ResponseHeaderHandlerChain handler = new ResponseHeaderHandlerChain(
// xml payload unmarshaller
new Unmarshallers.CopyObjectUnmarshaller(),
// header handlers
new ServerSideEncryptionHeaderHandler(),
new S3VersionHeaderHandler());
copyObjectResultHandler = invoke(request, handler, destinationBucketName, destinationKey);
} catch ( AmazonS3Exception ase ) {
/*
* If the request failed because one of the specified constraints
* was not met (ex: matching ETag, modified since date, etc.), then
* return null, so that users don't have to wrap their code in
* try/catch blocks and check for this status code if they want to
* use constraints.
*/
if ( ase.getStatusCode() == Constants.FAILED_PRECONDITION_STATUS_CODE ) {
return null;
}
throw ase;
}
/*
* CopyPart has two failure modes: 1 - An HTTP error code is returned
* and the error is processed like any other error response. 2 - An HTTP
* 200 OK code is returned, but the response content contains an XML
* error response.
*
* This makes it very difficult for the client runtime to cleanly detect
* this case and handle it like any other error response. We could
* extend the runtime to have a more flexible/customizable definition of
* success/error (per request), but it's probably overkill for this one
* special case.
*/
if ( copyObjectResultHandler.getErrorCode() != null ) {
String errorCode = copyObjectResultHandler.getErrorCode();
String errorMessage = copyObjectResultHandler.getErrorMessage();
String requestId = copyObjectResultHandler.getErrorRequestId();
String hostId = copyObjectResultHandler.getErrorHostId();
AmazonS3Exception ase = new AmazonS3Exception(errorMessage);
ase.setErrorCode(errorCode);
ase.setErrorType(ErrorType.Service);
ase.setRequestId(requestId);
ase.setExtendedRequestId(hostId);
ase.setServiceName(request.getServiceName());
ase.setStatusCode(200);
throw ase;
}
CopyPartResult copyPartResult = new CopyPartResult();
copyPartResult.setETag(copyObjectResultHandler.getETag());
copyPartResult.setPartNumber(copyPartRequest.getPartNumber());
copyPartResult.setLastModifiedDate(copyObjectResultHandler.getLastModified());
copyPartResult.setVersionId(copyObjectResultHandler.getVersionId());
copyPartResult.setSSEAlgorithm(copyObjectResultHandler.getSSEAlgorithm());
copyPartResult.setSSECustomerAlgorithm(copyObjectResultHandler.getSSECustomerAlgorithm());
copyPartResult.setSSECustomerKeyMd5(copyObjectResultHandler.getSSECustomerKeyMd5());
return copyPartResult;
}
@Override
public void deleteObject(String bucketName, String key)
throws SdkClientException, AmazonServiceException {
deleteObject(new DeleteObjectRequest(bucketName, key));
}
@Override
public void deleteObject(DeleteObjectRequest deleteObjectRequest)
throws SdkClientException, AmazonServiceException {
deleteObjectRequest = beforeClientExecution(deleteObjectRequest);
rejectNull(deleteObjectRequest,
"The delete object request must be specified when deleting an object");
rejectNull(deleteObjectRequest.getBucketName(), "The bucket name must be specified when deleting an object");
rejectNull(deleteObjectRequest.getKey(), "The key must be specified when deleting an object");
Request request = createRequest(deleteObjectRequest.getBucketName(), deleteObjectRequest.getKey(), deleteObjectRequest, HttpMethodName.DELETE);
invoke(request, voidResponseHandler, deleteObjectRequest.getBucketName(), deleteObjectRequest.getKey());
}
@Override
public DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteObjectsRequest) {
deleteObjectsRequest = beforeClientExecution(deleteObjectsRequest);
Request request = createRequest(deleteObjectsRequest.getBucketName(), null, deleteObjectsRequest, HttpMethodName.POST);
request.addParameter("delete", null);
if ( deleteObjectsRequest.getMfa() != null ) {
populateRequestWithMfaDetails(request, deleteObjectsRequest.getMfa());
}
populateRequesterPaysHeader(request, deleteObjectsRequest.isRequesterPays());
byte[] content = new MultiObjectDeleteXmlFactory().convertToXmlByteArray(deleteObjectsRequest);
request.addHeader("Content-Length", String.valueOf(content.length));
request.addHeader("Content-Type", "application/xml");
request.setContent(new ByteArrayInputStream(content));
try {
byte[] md5 = Md5Utils.computeMD5Hash(content);
String md5Base64 = BinaryUtils.toBase64(md5);
request.addHeader("Content-MD5", md5Base64);
} catch ( Exception e ) {
throw new SdkClientException("Couldn't compute md5 sum", e);
}
@SuppressWarnings("unchecked")
ResponseHeaderHandlerChain responseHandler = new ResponseHeaderHandlerChain(
new Unmarshallers.DeleteObjectsResultUnmarshaller(),
new S3RequesterChargedHeaderHandler());
DeleteObjectsResponse response = invoke(request, responseHandler, deleteObjectsRequest.getBucketName(), null);
/*
* If the result was only partially successful, throw an exception
*/
if ( !response.getErrors().isEmpty() ) {
Map headers = responseHandler.getResponseHeaders();
MultiObjectDeleteException ex = new MultiObjectDeleteException(
response.getErrors(),
response.getDeletedObjects());
ex.setStatusCode(200);
ex.setRequestId(headers.get(Headers.REQUEST_ID));
ex.setExtendedRequestId(headers.get(Headers.EXTENDED_REQUEST_ID));
ex.setCloudFrontId(headers.get(Headers.CLOUD_FRONT_ID));
throw ex;
}
DeleteObjectsResult result = new DeleteObjectsResult(response.getDeletedObjects(), response.isRequesterCharged());
return result;
}
@Override
public void deleteVersion(String bucketName, String key, String versionId)
throws SdkClientException, AmazonServiceException {
deleteVersion(new DeleteVersionRequest(bucketName, key, versionId));
}
@Override
public void deleteVersion(DeleteVersionRequest deleteVersionRequest)
throws SdkClientException, AmazonServiceException {
deleteVersionRequest = beforeClientExecution(deleteVersionRequest);
rejectNull(deleteVersionRequest,
"The delete version request object must be specified when deleting a version");
String bucketName = deleteVersionRequest.getBucketName();
String key = deleteVersionRequest.getKey();
String versionId = deleteVersionRequest.getVersionId();
rejectNull(bucketName, "The bucket name must be specified when deleting a version");
rejectNull(key, "The key must be specified when deleting a version");
rejectNull(versionId, "The version ID must be specified when deleting a version");
Request request = createRequest(bucketName, key, deleteVersionRequest, HttpMethodName.DELETE);
if (versionId != null) request.addParameter("versionId", versionId);
if (deleteVersionRequest.getMfa() != null) {
populateRequestWithMfaDetails(request, deleteVersionRequest.getMfa());
}
invoke(request, voidResponseHandler, bucketName, key);
}
@Override
public void setBucketVersioningConfiguration(SetBucketVersioningConfigurationRequest setBucketVersioningConfigurationRequest)
throws SdkClientException, AmazonServiceException {
setBucketVersioningConfigurationRequest = beforeClientExecution(setBucketVersioningConfigurationRequest);
rejectNull(setBucketVersioningConfigurationRequest,
"The SetBucketVersioningConfigurationRequest object must be specified when setting versioning configuration");
String bucketName = setBucketVersioningConfigurationRequest.getBucketName();
BucketVersioningConfiguration versioningConfiguration = setBucketVersioningConfigurationRequest.getVersioningConfiguration();
rejectNull(bucketName,
"The bucket name parameter must be specified when setting versioning configuration");
rejectNull(versioningConfiguration,
"The bucket versioning parameter must be specified when setting versioning configuration");
if (versioningConfiguration.isMfaDeleteEnabled() != null) {
rejectNull(setBucketVersioningConfigurationRequest.getMfa(),
"The MFA parameter must be specified when changing MFA Delete status in the versioning configuration");
}
Request request = createRequest(bucketName, null, setBucketVersioningConfigurationRequest, HttpMethodName.PUT);
request.addParameter("versioning", null);
if (versioningConfiguration.isMfaDeleteEnabled() != null) {
if (setBucketVersioningConfigurationRequest.getMfa() != null) {
populateRequestWithMfaDetails(request, setBucketVersioningConfigurationRequest.getMfa());
}
}
byte[] bytes = bucketConfigurationXmlFactory.convertToXmlByteArray(versioningConfiguration);
request.setContent(new ByteArrayInputStream(bytes));
invoke(request, voidResponseHandler, bucketName, null);
}
@Override
public BucketVersioningConfiguration getBucketVersioningConfiguration(String bucketName)
throws SdkClientException, AmazonServiceException {
return getBucketVersioningConfiguration(new GetBucketVersioningConfigurationRequest(bucketName));
}
@Override
public BucketVersioningConfiguration getBucketVersioningConfiguration(GetBucketVersioningConfigurationRequest getBucketVersioningConfigurationRequest)
throws SdkClientException, AmazonServiceException {
getBucketVersioningConfigurationRequest = beforeClientExecution(getBucketVersioningConfigurationRequest);
rejectNull(getBucketVersioningConfigurationRequest, "The request object parameter getBucketVersioningConfigurationRequest must be specified.");
String bucketName = getBucketVersioningConfigurationRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when querying versioning configuration");
Request request = createRequest(bucketName, null, getBucketVersioningConfigurationRequest, HttpMethodName.GET);
request.addParameter("versioning", null);
return invoke(request, new Unmarshallers.BucketVersioningConfigurationUnmarshaller(), bucketName, null);
}
@Override
public BucketWebsiteConfiguration getBucketWebsiteConfiguration(String bucketName)
throws SdkClientException, AmazonServiceException {
return getBucketWebsiteConfiguration(new GetBucketWebsiteConfigurationRequest(bucketName));
}
@Override
public BucketWebsiteConfiguration getBucketWebsiteConfiguration(GetBucketWebsiteConfigurationRequest getBucketWebsiteConfigurationRequest)
throws SdkClientException, AmazonServiceException {
getBucketWebsiteConfigurationRequest = beforeClientExecution(getBucketWebsiteConfigurationRequest);
rejectNull(getBucketWebsiteConfigurationRequest, "The request object parameter getBucketWebsiteConfigurationRequest must be specified.");
String bucketName = getBucketWebsiteConfigurationRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when requesting a bucket's website configuration");
Request request = createRequest(bucketName, null, getBucketWebsiteConfigurationRequest, HttpMethodName.GET);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "GetBucketWebsite");
request.addParameter("website", null);
request.addHeader("Content-Type", "application/xml");
try {
return invoke(request, new Unmarshallers.BucketWebsiteConfigurationUnmarshaller(), bucketName, null);
} catch (AmazonServiceException ase) {
if (ase.getStatusCode() == 404) return null;
throw ase;
}
}
@Override
public BucketLifecycleConfiguration getBucketLifecycleConfiguration(String bucketName) {
return getBucketLifecycleConfiguration(new GetBucketLifecycleConfigurationRequest(bucketName));
}
@Override
public BucketLifecycleConfiguration getBucketLifecycleConfiguration(GetBucketLifecycleConfigurationRequest getBucketLifecycleConfigurationRequest) {
getBucketLifecycleConfigurationRequest = beforeClientExecution(getBucketLifecycleConfigurationRequest);
rejectNull(getBucketLifecycleConfigurationRequest, "The request object pamameter getBucketLifecycleConfigurationRequest must be specified.");
String bucketName = getBucketLifecycleConfigurationRequest.getBucketName();
rejectNull(bucketName, "The bucket name must be specifed when retrieving the bucket lifecycle configuration.");
Request request = createRequest(bucketName, null, getBucketLifecycleConfigurationRequest, HttpMethodName.GET);
request.addParameter("lifecycle", null);
try {
return invoke(request, new Unmarshallers.BucketLifecycleConfigurationUnmarshaller(), bucketName, null);
} catch (AmazonServiceException ase) {
switch (ase.getStatusCode()) {
case 404:
return null;
default:
throw ase;
}
}
}
@Override
public void setBucketLifecycleConfiguration(String bucketName, BucketLifecycleConfiguration bucketLifecycleConfiguration) {
setBucketLifecycleConfiguration(new SetBucketLifecycleConfigurationRequest(bucketName, bucketLifecycleConfiguration));
}
@Override
public void setBucketLifecycleConfiguration(
SetBucketLifecycleConfigurationRequest setBucketLifecycleConfigurationRequest) {
setBucketLifecycleConfigurationRequest = beforeClientExecution(setBucketLifecycleConfigurationRequest);
rejectNull(setBucketLifecycleConfigurationRequest,
"The set bucket lifecycle configuration request object must be specified.");
String bucketName = setBucketLifecycleConfigurationRequest.getBucketName();
BucketLifecycleConfiguration bucketLifecycleConfiguration = setBucketLifecycleConfigurationRequest.getLifecycleConfiguration();
rejectNull(bucketName,
"The bucket name parameter must be specified when setting bucket lifecycle configuration.");
rejectNull(bucketLifecycleConfiguration,
"The lifecycle configuration parameter must be specified when setting bucket lifecycle configuration.");
Request request = createRequest(bucketName, null, setBucketLifecycleConfigurationRequest, HttpMethodName.PUT);
request.addParameter("lifecycle", null);
byte[] content = new BucketConfigurationXmlFactory().convertToXmlByteArray(bucketLifecycleConfiguration);
request.addHeader("Content-Length", String.valueOf(content.length));
request.addHeader("Content-Type", "application/xml");
request.setContent(new ByteArrayInputStream(content));
try {
byte[] md5 = Md5Utils.computeMD5Hash(content);
String md5Base64 = BinaryUtils.toBase64(md5);
request.addHeader("Content-MD5", md5Base64);
} catch ( Exception e ) {
throw new SdkClientException("Couldn't compute md5 sum", e);
}
invoke(request, voidResponseHandler, bucketName, null);
}
@Override
public void deleteBucketLifecycleConfiguration(String bucketName) {
deleteBucketLifecycleConfiguration(new DeleteBucketLifecycleConfigurationRequest(bucketName));
}
@Override
public void deleteBucketLifecycleConfiguration(
DeleteBucketLifecycleConfigurationRequest deleteBucketLifecycleConfigurationRequest) {
deleteBucketLifecycleConfigurationRequest = beforeClientExecution(deleteBucketLifecycleConfigurationRequest);
rejectNull(deleteBucketLifecycleConfigurationRequest,
"The delete bucket lifecycle configuration request object must be specified.");
String bucketName = deleteBucketLifecycleConfigurationRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when deleting bucket lifecycle configuration.");
Request request = createRequest(bucketName, null, deleteBucketLifecycleConfigurationRequest, HttpMethodName.DELETE);
request.addParameter("lifecycle", null);
invoke(request, voidResponseHandler, bucketName, null);
}
@Override
public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration(String bucketName) {
return getBucketCrossOriginConfiguration(new GetBucketCrossOriginConfigurationRequest(bucketName));
}
@Override
public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration(GetBucketCrossOriginConfigurationRequest getBucketCrossOriginConfigurationRequest) {
getBucketCrossOriginConfigurationRequest = beforeClientExecution(getBucketCrossOriginConfigurationRequest);
rejectNull(getBucketCrossOriginConfigurationRequest, "The request object parameter getBucketCrossOriginConfigurationRequest must be specified.");
String bucketName = getBucketCrossOriginConfigurationRequest.getBucketName();
rejectNull(bucketName, "The bucket name must be specified when retrieving the bucket cross origin configuration.");
Request request = createRequest(bucketName, null, getBucketCrossOriginConfigurationRequest, HttpMethodName.GET);
request.addParameter("cors", null);
addHeaderIfNotEmpty(
request, Headers.MIRROR_DESTINATION, getBucketCrossOriginConfigurationRequest.getWormMirrorDestination());
try {
return invoke(request, new Unmarshallers.BucketCrossOriginConfigurationUnmarshaller(), bucketName, null);
} catch (AmazonServiceException ase) {
switch (ase.getStatusCode()) {
case 404:
return null;
default:
throw ase;
}
}
}
@Override
public void setBucketCrossOriginConfiguration(String bucketName, BucketCrossOriginConfiguration bucketCrossOriginConfiguration) {
setBucketCrossOriginConfiguration(new SetBucketCrossOriginConfigurationRequest(bucketName, bucketCrossOriginConfiguration));
}
@Override
public void setBucketCrossOriginConfiguration(
SetBucketCrossOriginConfigurationRequest setBucketCrossOriginConfigurationRequest) {
setBucketCrossOriginConfigurationRequest = beforeClientExecution(setBucketCrossOriginConfigurationRequest);
rejectNull(setBucketCrossOriginConfigurationRequest,
"The set bucket cross origin configuration request object must be specified.");
String bucketName = setBucketCrossOriginConfigurationRequest.getBucketName();
BucketCrossOriginConfiguration bucketCrossOriginConfiguration = setBucketCrossOriginConfigurationRequest.getCrossOriginConfiguration();
rejectNull(bucketName,
"The bucket name parameter must be specified when setting bucket cross origin configuration.");
rejectNull(bucketCrossOriginConfiguration,
"The cross origin configuration parameter must be specified when setting bucket cross origin configuration.");
Request request = createRequest(bucketName, null, setBucketCrossOriginConfigurationRequest, HttpMethodName.PUT);
request.addParameter("cors", null);
byte[] content = new BucketConfigurationXmlFactory().convertToXmlByteArray(bucketCrossOriginConfiguration);
request.addHeader("Content-Length", String.valueOf(content.length));
request.addHeader("Content-Type", "application/xml");
request.setContent(new ByteArrayInputStream(content));
try {
byte[] md5 = Md5Utils.computeMD5Hash(content);
String md5Base64 = BinaryUtils.toBase64(md5);
request.addHeader("Content-MD5", md5Base64);
} catch ( Exception e ) {
throw new SdkClientException("Couldn't compute md5 sum", e);
}
invoke(request, voidResponseHandler, bucketName, null);
}
@Override
public void deleteBucketCrossOriginConfiguration(String bucketName) {
deleteBucketCrossOriginConfiguration(new DeleteBucketCrossOriginConfigurationRequest(bucketName));
}
@Override
public void deleteBucketCrossOriginConfiguration(
DeleteBucketCrossOriginConfigurationRequest deleteBucketCrossOriginConfigurationRequest) {
deleteBucketCrossOriginConfigurationRequest = beforeClientExecution(deleteBucketCrossOriginConfigurationRequest);
rejectNull(deleteBucketCrossOriginConfigurationRequest,
"The delete bucket cross origin configuration request object must be specified.");
String bucketName = deleteBucketCrossOriginConfigurationRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when deleting bucket cross origin configuration.");
Request request = createRequest(bucketName, null, deleteBucketCrossOriginConfigurationRequest, HttpMethodName.DELETE);
request.addParameter("cors", null);
invoke(request, voidResponseHandler, bucketName, null);
}
/**
* @exclude
*/
@Override
public BucketTaggingConfiguration getBucketTaggingConfiguration(String bucketName) {
return getBucketTaggingConfiguration(new GetBucketTaggingConfigurationRequest(bucketName));
}
/**
* @exclude
*/
@Override
public BucketTaggingConfiguration getBucketTaggingConfiguration(GetBucketTaggingConfigurationRequest getBucketTaggingConfigurationRequest) {
getBucketTaggingConfigurationRequest = beforeClientExecution(getBucketTaggingConfigurationRequest);
rejectNull(getBucketTaggingConfigurationRequest, "The request object parameter getBucketTaggingConfigurationRequest must be specifed.");
String bucketName = getBucketTaggingConfigurationRequest.getBucketName();
rejectNull(bucketName, "The bucket name must be specified when retrieving the bucket tagging configuration.");
Request request = createRequest(bucketName, null, getBucketTaggingConfigurationRequest, HttpMethodName.GET);
addHeaderIfNotEmpty(
request, Headers.MIRROR_DESTINATION, getBucketTaggingConfigurationRequest.getWormMirrorDestination());
request.addParameter("tagging", null);
try {
return invoke(request, new Unmarshallers.BucketTaggingConfigurationUnmarshaller(), bucketName, null);
} catch (AmazonServiceException ase) {
switch (ase.getStatusCode()) {
case 404:
return null;
default:
throw ase;
}
}
}
/**
* @exclude
*/
@Override
public void setBucketTaggingConfiguration(String bucketName, BucketTaggingConfiguration bucketTaggingConfiguration) {
setBucketTaggingConfiguration(new SetBucketTaggingConfigurationRequest(bucketName, bucketTaggingConfiguration));
}
/**
* @exclude
*/
@Override
public void setBucketTaggingConfiguration(
SetBucketTaggingConfigurationRequest setBucketTaggingConfigurationRequest) {
setBucketTaggingConfigurationRequest = beforeClientExecution(setBucketTaggingConfigurationRequest);
rejectNull(setBucketTaggingConfigurationRequest,
"The set bucket tagging configuration request object must be specified.");
String bucketName = setBucketTaggingConfigurationRequest.getBucketName();
BucketTaggingConfiguration bucketTaggingConfiguration = setBucketTaggingConfigurationRequest.getTaggingConfiguration();
rejectNull(bucketName,
"The bucket name parameter must be specified when setting bucket tagging configuration.");
rejectNull(bucketTaggingConfiguration,
"The tagging configuration parameter must be specified when setting bucket tagging configuration.");
Request request = createRequest(bucketName, null, setBucketTaggingConfigurationRequest, HttpMethodName.PUT);
request.addParameter("tagging", null);
byte[] content = new BucketConfigurationXmlFactory().convertToXmlByteArray(bucketTaggingConfiguration);
request.addHeader("Content-Length", String.valueOf(content.length));
request.addHeader("Content-Type", "application/xml");
request.setContent(new ByteArrayInputStream(content));
try {
byte[] md5 = Md5Utils.computeMD5Hash(content);
String md5Base64 = BinaryUtils.toBase64(md5);
request.addHeader("Content-MD5", md5Base64);
} catch ( Exception e ) {
throw new SdkClientException("Couldn't compute md5 sum", e);
}
invoke(request, voidResponseHandler, bucketName, null);
}
/**
* @exclude
*/
@Override
public void deleteBucketTaggingConfiguration(String bucketName) {
deleteBucketTaggingConfiguration(new DeleteBucketTaggingConfigurationRequest(bucketName));
}
/**
* @exclude
*/
@Override
public void deleteBucketTaggingConfiguration(
DeleteBucketTaggingConfigurationRequest deleteBucketTaggingConfigurationRequest) {
deleteBucketTaggingConfigurationRequest = beforeClientExecution(deleteBucketTaggingConfigurationRequest);
rejectNull(deleteBucketTaggingConfigurationRequest,
"The delete bucket tagging configuration request object must be specified.");
String bucketName = deleteBucketTaggingConfigurationRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when deleting bucket tagging configuration.");
Request request = createRequest(bucketName, null, deleteBucketTaggingConfigurationRequest, HttpMethodName.DELETE);
request.addParameter("tagging", null);
invoke(request, voidResponseHandler, bucketName, null);
}
@Override
public void setBucketWebsiteConfiguration(String bucketName, BucketWebsiteConfiguration configuration)
throws SdkClientException, AmazonServiceException {
setBucketWebsiteConfiguration(new SetBucketWebsiteConfigurationRequest(bucketName, configuration));
}
@Override
public void setBucketWebsiteConfiguration(SetBucketWebsiteConfigurationRequest setBucketWebsiteConfigurationRequest)
throws SdkClientException, AmazonServiceException {
setBucketWebsiteConfigurationRequest = beforeClientExecution(setBucketWebsiteConfigurationRequest);
String bucketName = setBucketWebsiteConfigurationRequest.getBucketName();
BucketWebsiteConfiguration configuration = setBucketWebsiteConfigurationRequest.getConfiguration();
rejectNull(bucketName,
"The bucket name parameter must be specified when setting a bucket's website configuration");
rejectNull(configuration,
"The bucket website configuration parameter must be specified when setting a bucket's website configuration");
if (configuration.getRedirectAllRequestsTo() == null) {
rejectNull(configuration.getIndexDocumentSuffix(),
"The bucket website configuration parameter must specify the index document suffix when setting a bucket's website configuration");
}
Request request = createRequest(bucketName, null, setBucketWebsiteConfigurationRequest, HttpMethodName.PUT);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutBucketWebsite");
request.addParameter("website", null);
request.addHeader("Content-Type", "application/xml");
byte[] bytes = bucketConfigurationXmlFactory.convertToXmlByteArray(configuration);
request.setContent(new ByteArrayInputStream(bytes));
invoke(request, voidResponseHandler, bucketName, null);
}
@Override
public void deleteBucketWebsiteConfiguration(String bucketName)
throws SdkClientException, AmazonServiceException {
deleteBucketWebsiteConfiguration(new DeleteBucketWebsiteConfigurationRequest(bucketName));
}
@Override
public void deleteBucketWebsiteConfiguration(DeleteBucketWebsiteConfigurationRequest deleteBucketWebsiteConfigurationRequest)
throws SdkClientException, AmazonServiceException {
deleteBucketWebsiteConfigurationRequest = beforeClientExecution(deleteBucketWebsiteConfigurationRequest);
String bucketName = deleteBucketWebsiteConfigurationRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when deleting a bucket's website configuration");
Request request = createRequest(bucketName, null, deleteBucketWebsiteConfigurationRequest, HttpMethodName.DELETE);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeleteBucketWebsite");
request.addParameter("website", null);
request.addHeader("Content-Type", "application/xml");
invoke(request, voidResponseHandler, bucketName, null);
}
@Override
public SetPublicAccessBlockResult setPublicAccessBlock(SetPublicAccessBlockRequest setPublicAccessBlockRequest) {
setPublicAccessBlockRequest = beforeClientExecution(setPublicAccessBlockRequest);
rejectNull(setPublicAccessBlockRequest, "The request object must be specified.");
String bucketName = setPublicAccessBlockRequest.getBucketName();
PublicAccessBlockConfiguration config = setPublicAccessBlockRequest.getPublicAccessBlockConfiguration();
rejectNull(bucketName,
"The bucket name parameter must be specified when setting public block configuration.");
rejectNull(config,
"The PublicAccessBlockConfiguration parameter must be specified when setting public block");
Request request = createRequest(bucketName, null, setPublicAccessBlockRequest, HttpMethodName.PUT);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "PutPublicAccessBlock");
request.addParameter("publicAccessBlock", null);
byte[] bytes = bucketConfigurationXmlFactory.convertToXmlByteArray(config);
request.setContent(new ByteArrayInputStream(bytes));
return invoke(request, new Unmarshallers.SetPublicAccessBlockUnmarshaller(), bucketName, null);
}
@Override
public GetPublicAccessBlockResult getPublicAccessBlock(GetPublicAccessBlockRequest getPublicAccessBlockRequest) {
getPublicAccessBlockRequest = beforeClientExecution(getPublicAccessBlockRequest);
rejectNull(getPublicAccessBlockRequest, "The request object must be specified.");
String bucketName = getPublicAccessBlockRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when getting public block configuration.");
Request request = createRequest(bucketName, null, getPublicAccessBlockRequest, HttpMethodName.GET);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "GetPublicAccessBlock");
request.addParameter("publicAccessBlock", null);
return invoke(request, GetPublicAccessBlockStaxUnmarshaller.getInstance(), bucketName, null);
}
@Override
public DeletePublicAccessBlockResult deletePublicAccessBlock(DeletePublicAccessBlockRequest deletePublicAccessBlockRequest) {
deletePublicAccessBlockRequest = beforeClientExecution(deletePublicAccessBlockRequest);
rejectNull(deletePublicAccessBlockRequest, "The request object must be specified.");
String bucketName = deletePublicAccessBlockRequest.getBucketName();
rejectNull(bucketName,
"The bucket name parameter must be specified when deleting public block configuration.");
Request request = createRequest(bucketName, null, deletePublicAccessBlockRequest, HttpMethodName.DELETE);
request.addHandlerContext(HandlerContextKey.OPERATION_NAME, "DeletePublicAccessBlock");
request.addParameter("publicAccessBlock", null);
return invoke(request, new Unmarshallers.DeletePublicAccessBlockUnmarshaller(), bucketName, null);
}
@Override
public URL generatePresignedUrl(String bucketName, String key, Date expiration)
throws SdkClientException {
return generatePresignedUrl(bucketName, key, expiration, HttpMethod.GET);
}
@Override
public URL generatePresignedUrl(String bucketName, String key, Date expiration, HttpMethod method)
throws SdkClientException {
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucketName, key, method);
request.setExpiration(expiration);
return generatePresignedUrl(request);
}
@Override
public URL generatePresignedUrl(GeneratePresignedUrlRequest req) {
rejectNull(req,
"The request parameter must be specified when generating a pre-signed URL");
req.rejectIllegalArguments();
//Check if credentialProvider is instance of IBMOAuthCredentials
if (this.awsCredentialsProvider.getCredentials() instanceof IBMOAuthCredentials) {
IBMOAuthCredentials creds = (IBMOAuthCredentials) this.awsCredentialsProvider.getCredentials();
if (creds.getApiKey() != null || creds.getTokenManager() != null) {
throw new AmazonS3Exception("generatePresignedUrl() is not supported with IAM credentials");
}
}
final String bucketName = req.getBucketName();
final String key = req.getKey();
if (req.getExpiration() == null) {
req.setExpiration(
new Date(System.currentTimeMillis() + 1000 * 60 * 15));
}
HttpMethodName httpMethod = HttpMethodName.valueOf(req.getMethod().toString());
// If the key starts with a slash character itself, the following method
// will actually add another slash before the resource path to prevent
// the HttpClient mistakenly treating the slash as a path delimiter.
// For presigned request, we need to remember to remove this extra slash
// before generating the URL.
Request request = createRequest(
bucketName, key, req, httpMethod);
addParameterIfNotNull(request, "versionId", req.getVersionId());
if (req.isZeroByteContent())
request.setContent(new ByteArrayInputStream(new byte[0]));
for (Entry entry : req.getRequestParameters().entrySet()) {
request.addParameter(entry.getKey(), entry.getValue());
}
addHeaderIfNotNull(request, Headers.CONTENT_TYPE, req.getContentType());
addHeaderIfNotNull(request, Headers.CONTENT_MD5, req.getContentMd5());
// SSE-C
populateSSE_C(request, req.getSSECustomerKey());
// SSE
addHeaderIfNotNull(request, Headers.SERVER_SIDE_ENCRYPTION,
req.getSSEAlgorithm());
// SSE-KMS
addHeaderIfNotNull(request,
Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, req.getKmsCmkId());
// Custom headers that open up the possibility of supporting unexpected
// cases.
Map customHeaders = req.getCustomRequestHeaders();
if (customHeaders != null) {
for (Map.Entry e: customHeaders.entrySet()) {
request.addHeader(e.getKey(), e.getValue());
}
}
addResponseHeaderParameters(request, req.getResponseHeaders());
Signer signer = createSigner(request, bucketName, key);
if (signer instanceof Presigner) {
// If we have a signer which knows how to presign requests,
// delegate directly to it.
((Presigner) signer).presignRequest(
request,
CredentialUtils.getCredentialsProvider(request.getOriginalRequest(), awsCredentialsProvider).getCredentials(),
req.getExpiration()
);
} else {
// Otherwise use the default presigning method, which is hardcoded
// to use QueryStringSigner.
presignRequest(
request,
req.getMethod(),
bucketName,
key,
req.getExpiration(),
null
);
}
// Remove the leading slash (if any) in the resource-path
return ServiceUtils.convertRequestToUrl(request, true, false);
}
@Override
public void abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest)
throws SdkClientException, AmazonServiceException {
abortMultipartUploadRequest = beforeClientExecution(abortMultipartUploadRequest);
rejectNull(abortMultipartUploadRequest,
"The request parameter must be specified when aborting a multipart upload");
rejectNull(abortMultipartUploadRequest.getBucketName(),
"The bucket name parameter must be specified when aborting a multipart upload");
rejectNull(abortMultipartUploadRequest.getKey(),
"The key parameter must be specified when aborting a multipart upload");
rejectNull(abortMultipartUploadRequest.getUploadId(),
"The upload ID parameter must be specified when aborting a multipart upload");
String bucketName = abortMultipartUploadRequest.getBucketName();
String key = abortMultipartUploadRequest.getKey();
Request request = createRequest(bucketName, key, abortMultipartUploadRequest, HttpMethodName.DELETE);
request.addParameter("uploadId", abortMultipartUploadRequest.getUploadId());
populateRequesterPaysHeader(request, abortMultipartUploadRequest.isRequesterPays());
invoke(request, voidResponseHandler, bucketName, key);
}
@Override
public CompleteMultipartUploadResult completeMultipartUpload(
CompleteMultipartUploadRequest completeMultipartUploadRequest)
throws SdkClientException, AmazonServiceException {
completeMultipartUploadRequest = beforeClientExecution(completeMultipartUploadRequest);
rejectNull(completeMultipartUploadRequest,
"The request parameter must be specified when completing a multipart upload");
String bucketName = completeMultipartUploadRequest.getBucketName();
String key = completeMultipartUploadRequest.getKey();
String uploadId = completeMultipartUploadRequest.getUploadId();
rejectNull(bucketName,
"The bucket name parameter must be specified when completing a multipart upload");
rejectNull(key,
"The key parameter must be specified when completing a multipart upload");
rejectNull(uploadId,
"The upload ID parameter must be specified when completing a multipart upload");
rejectNull(completeMultipartUploadRequest.getPartETags(),
"The part ETags parameter must be specified when completing a multipart upload");
int retries = 0;
CompleteMultipartUploadHandler handler;
do {
Request request = createRequest(bucketName, key, completeMultipartUploadRequest, HttpMethodName.POST);
request.addParameter("uploadId", uploadId);
populateRequesterPaysHeader(request, completeMultipartUploadRequest.isRequesterPays());
byte[] xml = RequestXmlFactory.convertToXmlByteArray(completeMultipartUploadRequest.getPartETags());
request.addHeader("Content-Type", "application/xml");
request.addHeader("Content-Length", String.valueOf(xml.length));
if(completeMultipartUploadRequest.getRetentionExpirationDate() != null) {
request.addHeader(Headers.RETENTION_EXPIRATION_DATE,
DateUtils.formatRFC822Date(completeMultipartUploadRequest.getRetentionExpirationDate()));
}
if(completeMultipartUploadRequest.getRetentionPeriod() != null) {
request.addHeader(Headers.RETENTION_PERIOD,
completeMultipartUploadRequest.getRetentionPeriod().toString());
}
addHeaderIfNotNull(request, Headers.RETENTION_LEGAL_HOLD_ID,
completeMultipartUploadRequest.getRetentionLegalHoldId());
request.setContent(new ByteArrayInputStream(xml));
// Calculate Content MD5
try {
byte[] md5 = Md5Utils.computeMD5Hash(new ByteArrayInputStream(xml));
String md5Base64 = BinaryUtils.toBase64(md5);
request.addHeader("Content-MD5", md5Base64);
} catch ( Exception e ) {
throw new SdkClientException("Couldn't compute md5 sum", e);
}
@SuppressWarnings("unchecked")
ResponseHeaderHandlerChain responseHandler = new ResponseHeaderHandlerChain(
// xml payload unmarshaller
new Unmarshallers.CompleteMultipartUploadResultUnmarshaller(),
// header handlers
new ServerSideEncryptionHeaderHandler(),
new ObjectExpirationHeaderHandler(),
new S3VersionHeaderHandler(),
new S3RequesterChargedHeaderHandler());
handler = invoke(request, responseHandler, bucketName, key);
if (handler.getCompleteMultipartUploadResult() != null) {
return handler.getCompleteMultipartUploadResult();
}
} while (shouldRetryCompleteMultipartUpload(completeMultipartUploadRequest,
handler.getAmazonS3Exception(), retries++));
throw handler.getAmazonS3Exception();
}
private boolean shouldRetryCompleteMultipartUpload(AmazonWebServiceRequest originalRequest,
AmazonS3Exception exception,
int retriesAttempted) {
final RetryPolicy retryPolicy = clientConfiguration.getRetryPolicy();
if (retryPolicy == null || retryPolicy.getRetryCondition() == null) {
return false;
}
if (retryPolicy == PredefinedRetryPolicies.NO_RETRY_POLICY) {
return false;
}
return completeMultipartUploadRetryCondition.shouldRetry
(originalRequest, exception, retriesAttempted);
}
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(
InitiateMultipartUploadRequest initiateMultipartUploadRequest)
throws SdkClientException, AmazonServiceException {
initiateMultipartUploadRequest = beforeClientExecution(initiateMultipartUploadRequest);
rejectNull(initiateMultipartUploadRequest,
"The request parameter must be specified when initiating a multipart upload");
rejectNull(initiateMultipartUploadRequest.getBucketName(),
"The bucket name parameter must be specified when initiating a multipart upload");
rejectNull(initiateMultipartUploadRequest.getKey(),
"The key parameter must be specified when initiating a multipart upload");
Request request = createRequest(initiateMultipartUploadRequest.getBucketName(), initiateMultipartUploadRequest.getKey(), initiateMultipartUploadRequest, HttpMethodName.POST);
request.addParameter("uploads", null);
if (initiateMultipartUploadRequest.getStorageClass() != null)
request.addHeader(Headers.STORAGE_CLASS, initiateMultipartUploadRequest.getStorageClass().toString());
if (initiateMultipartUploadRequest.getRedirectLocation() != null) {
request.addHeader(Headers.REDIRECT_LOCATION, initiateMultipartUploadRequest.getRedirectLocation());
}
if ( initiateMultipartUploadRequest.getAccessControlList() != null ) {
addAclHeaders(request, initiateMultipartUploadRequest.getAccessControlList());
} else if ( initiateMultipartUploadRequest.getCannedACL() != null ) {
request.addHeader(Headers.S3_CANNED_ACL, initiateMultipartUploadRequest.getCannedACL().toString());
}
if (initiateMultipartUploadRequest.objectMetadata != null) {
populateRequestMetadata(request, initiateMultipartUploadRequest.objectMetadata);
}
populateRequesterPaysHeader(request, initiateMultipartUploadRequest.isRequesterPays());
// Populate the SSE-C parameters to the request header
populateSSE_C(request, initiateMultipartUploadRequest.getSSECustomerKey());
// Populate the SSE AWS KMS parameters to the request header
populateSSE_KMS(request,
initiateMultipartUploadRequest.getSSEAwsKeyManagementParams());
addHeaderIfNotNull(request, Headers.S3_TAGGING, urlEncodeTags(initiateMultipartUploadRequest.getTagging()));
// Be careful that we don't send the object's total size as the content
// length for the InitiateMultipartUpload request.
setZeroContentLength(request);
// Set the request content to be empty (but not null) to force the runtime to pass
// any query params in the query string and not the request body, to keep S3 happy.
request.setContent(new ByteArrayInputStream(new byte[0]));
@SuppressWarnings("unchecked")
ResponseHeaderHandlerChain responseHandler = new ResponseHeaderHandlerChain(
// xml payload unmarshaller
new Unmarshallers.InitiateMultipartUploadResultUnmarshaller(),
// header handlers
new ServerSideEncryptionHeaderHandler(),
new S3RequesterChargedHeaderHandler(),
new InitiateMultipartUploadHeaderHandler());
return invoke(request, responseHandler,
initiateMultipartUploadRequest.getBucketName(), initiateMultipartUploadRequest.getKey());
}
@Override
public MultipartUploadListing listMultipartUploads(ListMultipartUploadsRequest listMultipartUploadsRequest)
throws SdkClientException, AmazonServiceException {
listMultipartUploadsRequest = beforeClientExecution(listMultipartUploadsRequest);
rejectNull(listMultipartUploadsRequest,
"The request parameter must be specified when listing multipart uploads");
rejectNull(listMultipartUploadsRequest.getBucketName(),
"The bucket name parameter must be specified when listing multipart uploads");
Request request = createRequest(listMultipartUploadsRequest.getBucketName(), null, listMultipartUploadsRequest, HttpMethodName.GET);
request.addParameter("uploads", null);
if (listMultipartUploadsRequest.getKeyMarker() != null) request.addParameter("key-marker", listMultipartUploadsRequest.getKeyMarker());
if (listMultipartUploadsRequest.getMaxUploads() != null) request.addParameter("max-uploads", listMultipartUploadsRequest.getMaxUploads().toString());
if (listMultipartUploadsRequest.getUploadIdMarker() != null) request.addParameter("upload-id-marker", listMultipartUploadsRequest.getUploadIdMarker());
if (listMultipartUploadsRequest.getDelimiter() != null) request.addParameter("delimiter", listMultipartUploadsRequest.getDelimiter());
if (listMultipartUploadsRequest.getPrefix() != null) request.addParameter("prefix", listMultipartUploadsRequest.getPrefix());
if (listMultipartUploadsRequest.getEncodingType() != null) request.addParameter("encoding-type", listMultipartUploadsRequest.getEncodingType());
addHeaderIfNotEmpty(
request, Headers.MIRROR_DESTINATION, listMultipartUploadsRequest.getWormMirrorDestination());
return invoke(request, new Unmarshallers.ListMultipartUploadsResultUnmarshaller(), listMultipartUploadsRequest.getBucketName(), null);
}
@Override
public PartListing listParts(ListPartsRequest listPartsRequest)
throws SdkClientException, AmazonServiceException {
listPartsRequest = beforeClientExecution(listPartsRequest);
rejectNull(listPartsRequest,
"The request parameter must be specified when listing parts");
rejectNull(listPartsRequest.getBucketName(),
"The bucket name parameter must be specified when listing parts");
rejectNull(listPartsRequest.getKey(),
"The key parameter must be specified when listing parts");
rejectNull(listPartsRequest.getUploadId(),
"The upload ID parameter must be specified when listing parts");
Request request = createRequest(listPartsRequest.getBucketName(), listPartsRequest.getKey(), listPartsRequest, HttpMethodName.GET);
request.addParameter("uploadId", listPartsRequest.getUploadId());
if (listPartsRequest.getMaxParts() != null) request.addParameter("max-parts", listPartsRequest.getMaxParts().toString());
if (listPartsRequest.getPartNumberMarker() != null) request.addParameter("part-number-marker", listPartsRequest.getPartNumberMarker().toString());
if (listPartsRequest.getEncodingType() != null) request.addParameter("encoding-type", listPartsRequest.getEncodingType());
populateRequesterPaysHeader(request, listPartsRequest.isRequesterPays());
addHeaderIfNotEmpty(
request, Headers.MIRROR_DESTINATION, listPartsRequest.getWormMirrorDestination());
@SuppressWarnings("unchecked")
ResponseHeaderHandlerChain responseHandler = new ResponseHeaderHandlerChain(
// xml payload unmarshaller
new Unmarshallers.ListPartsResultUnmarshaller(),
// header handler
new S3RequesterChargedHeaderHandler(),
new ListPartsHeaderHandler());
return invoke(request, responseHandler, listPartsRequest.getBucketName(), listPartsRequest.getKey());
}
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest)
throws SdkClientException, AmazonServiceException {
uploadPartRequest = beforeClientExecution(uploadPartRequest);
rejectNull(uploadPartRequest,
"The request parameter must be specified when uploading a part");
final File fileOrig = uploadPartRequest.getFile();
final InputStream isOrig = uploadPartRequest.getInputStream();
final String bucketName = uploadPartRequest.getBucketName();
final String key = uploadPartRequest.getKey();
final String uploadId = uploadPartRequest.getUploadId();
final int partNumber = uploadPartRequest.getPartNumber();
final long partSize = uploadPartRequest.getPartSize();
rejectNull(bucketName,
"The bucket name parameter must be specified when uploading a part");
rejectNull(key,
"The key parameter must be specified when uploading a part");
rejectNull(uploadId,
"The upload ID parameter must be specified when uploading a part");
Request request = createRequest(bucketName, key, uploadPartRequest, HttpMethodName.PUT);
request.addParameter("uploadId", uploadId);
request.addParameter("partNumber", Integer.toString(partNumber));
final ObjectMetadata objectMetadata = uploadPartRequest.getObjectMetadata();
if (objectMetadata != null)
populateRequestMetadata(request, objectMetadata);
addHeaderIfNotNull(request, Headers.CONTENT_MD5, uploadPartRequest.getMd5Digest());
request.addHeader(Headers.CONTENT_LENGTH, Long.toString(partSize));
populateRequesterPaysHeader(request, uploadPartRequest.isRequesterPays());
// Populate the SSE-C parameters to the request header
populateSSE_C(request, uploadPartRequest.getSSECustomerKey());
InputStream isCurr = isOrig;
try {
if (fileOrig == null) {
if (isOrig == null) {
throw new IllegalArgumentException(
"A File or InputStream must be specified when uploading part");
} else {
// When isCurr is a FileInputStream, this wrapping enables
// unlimited mark-and-reset
isCurr = ReleasableInputStream.wrap(isCurr);
}
// Make backward compatible with buffer size via system property
final Integer bufsize = Constants.getS3StreamBufferSize();
if (bufsize != null) {
AmazonWebServiceRequest awsreq = request.getOriginalRequest();
// Note awsreq is never null at this point even if the original
// request was
awsreq.getRequestClientOptions()
.setReadLimit(bufsize.intValue());
}
} else {
try {
isCurr = new ResettableInputStream(fileOrig);
} catch(IOException e) {
throw new IllegalArgumentException("Failed to open file "
+ fileOrig, e);
}
}
final boolean closeStream = uploadPartRequest.isCalculateMD5() ?
false : uploadPartRequest.isLastPart();
isCurr = new InputSubstream(
isCurr,
uploadPartRequest.getFileOffset(),
partSize,
closeStream);
// Calculate Content MD5 on part upload if requested.
if(uploadPartRequest.getMd5Digest() == null
&& uploadPartRequest.isCalculateMD5()
&& isCurr.markSupported()) {
try {
uploadPartRequest.setMd5Digest(Md5Utils.md5AsBase64(isCurr));
request.addHeader("Content-MD5", uploadPartRequest.getMd5Digest());
isCurr.reset();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
MD5DigestCalculatingInputStream md5DigestStream = null;
if (uploadPartRequest.getMd5Digest() == null
&& !skipMd5CheckStrategy.skipClientSideValidationPerRequest(uploadPartRequest)) {
/*
* If the user hasn't set the content MD5, then we don't want to buffer the whole
* stream in memory just to calculate it. Instead, we can calculate it on the fly
* and validate it with the returned ETag from the object upload.
*/
isCurr = md5DigestStream = new MD5DigestCalculatingInputStream(isCurr);
}
final ProgressListener listener = uploadPartRequest.getGeneralProgressListener();
publishProgress(listener, ProgressEventType.TRANSFER_PART_STARTED_EVENT);
return doUploadPart(bucketName, key, uploadId, partNumber,
partSize, request, isCurr, md5DigestStream, listener);
} finally {
cleanupDataSource(uploadPartRequest, fileOrig, isOrig, isCurr, log);
}
}
private UploadPartResult doUploadPart(final String bucketName,
final String key, final String uploadId, final int partNumber,
final long partSize, Request request,
InputStream inputStream,
MD5DigestCalculatingInputStream md5DigestStream,
final ProgressListener listener) {
try {
request.setContent(inputStream);
ObjectMetadata metadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
final String etag = metadata.getETag();
if (md5DigestStream != null
&& !skipMd5CheckStrategy.skipClientSideValidationPerUploadPartResponse(metadata)) {
byte[] clientSideHash = md5DigestStream.getMd5Digest();
byte[] serverSideHash = BinaryUtils.fromHex(etag);
if (!Arrays.equals(clientSideHash, serverSideHash)) {
final String info = "bucketName: " + bucketName + ", key: "
+ key + ", uploadId: " + uploadId
+ ", partNumber: " + partNumber + ", partSize: "
+ partSize;
throw new SdkClientException(
"Unable to verify integrity of data upload. "
+ "Client calculated content hash (contentMD5: "
+ Base16.encodeAsString(clientSideHash)
+ " in hex) didn't match hash (etag: "
+ etag
+ " in hex) calculated by Amazon S3. "
+ "You may need to delete the data stored in Amazon S3. "
+ "(" + info + ")");
}
}
publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
UploadPartResult result = new UploadPartResult();
result.setETag(etag);
result.setPartNumber(partNumber);
result.setSSEAlgorithm(metadata.getSSEAlgorithm());
result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm());
result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5());
result.setRequesterCharged(metadata.isRequesterCharged());
return result;
} catch (Throwable t) {
publishProgress(listener, ProgressEventType.TRANSFER_PART_FAILED_EVENT);
// Leaving this here in case anyone is depending on it, but it's
// inconsistent with other methods which only generate one of
// COMPLETED_EVENT_CODE or FAILED_EVENT_CODE.
publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
throw failure(t);
}
}
@Override
public S3ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
return (S3ResponseMetadata)client.getResponseMetadataForRequest(request);
}
@Override
public void restoreObject(RestoreObjectRequest restoreObjectRequest)
throws AmazonServiceException {
restoreObjectRequest = beforeClientExecution(restoreObjectRequest);
String bucketName = restoreObjectRequest.getBucketName();
String key = restoreObjectRequest.getKey();
String versionId = restoreObjectRequest.getVersionId();
int expirationIndays = restoreObjectRequest.getExpirationInDays();
rejectNull(bucketName, "The bucket name parameter must be specified when copying a glacier object");
rejectNull(key, "The key parameter must be specified when copying a glacier object");
if (expirationIndays == -1) {
throw new IllegalArgumentException("The expiration in days parameter must be specified when copying a glacier object");
}
Request request = createRequest(bucketName, key, restoreObjectRequest, HttpMethodName.POST);
request.addParameter("restore", null);
if (versionId != null) {
request.addParameter("versionId", versionId);
}
populateRequesterPaysHeader(request, restoreObjectRequest.isRequesterPays());
byte[] content = RequestXmlFactory.convertToXmlByteArray(restoreObjectRequest);
request.addHeader("Content-Length", String.valueOf(content.length));
request.addHeader("Content-Type", "application/xml");
request.setContent(new ByteArrayInputStream(content));
try {
byte[] md5 = Md5Utils.computeMD5Hash(content);
String md5Base64 = BinaryUtils.toBase64(md5);
request.addHeader("Content-MD5", md5Base64);
} catch (Exception e) {
throw new SdkClientException("Couldn't compute md5 sum", e);
}
invoke(request, voidResponseHandler, bucketName, key);
}
@Override
public void restoreObject(String bucketName, String key, int expirationInDays)
throws AmazonServiceException {
restoreObject(new RestoreObjectRequest(bucketName, key, expirationInDays));
}
@Override
public PutObjectResult putObject(String bucketName, String key, String content)
throws AmazonServiceException, SdkClientException {
rejectNull(bucketName, "Bucket name must be provided");
rejectNull(key, "Object key must be provided");
rejectNull(content, "String content must be provided");
byte[] contentBytes = content.getBytes(StringUtils.UTF8);
InputStream is = new ByteArrayInputStream(contentBytes);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType("text/plain");
metadata.setContentLength(contentBytes.length);
return putObject(new PutObjectRequest(bucketName, key, is, metadata));
}
/*
* Private Interface
*/
/**
*
* Asserts that the specified parameter value is not null
and if it is,
* throws an IllegalArgumentException
with the specified error message.
*
*
* @param parameterValue
* The parameter value being checked.
* @param errorMessage
* The error message to include in the IllegalArgumentException
* if the specified parameter is null.
*/
private void rejectNull(Object parameterValue, String errorMessage) {
if (parameterValue == null) throw new IllegalArgumentException(errorMessage);
}
/**
*
* Gets the Amazon S3 {@link AccessControlList} (ACL) for the specified resource.
* (bucket if only the bucketName parameter is specified, otherwise the object with the
* specified key in the bucket).
*
*
* @param bucketName
* The name of the bucket whose ACL should be returned if the key
* parameter is not specified, otherwise the bucket containing
* the specified key.
* @param key
* The object key whose ACL should be retrieve. If not specified,
* the bucket's ACL is returned.
* @param versionId
* The version ID of the object version whose ACL is being
* retrieved.
* @param originalRequest
* The original, user facing request object.
*
* @return The S3 ACL for the specified resource.
*/
private AccessControlList getAcl(String bucketName, String key, String versionId,
boolean isRequesterPays, AmazonWebServiceRequest originalRequest) {
if (originalRequest == null) originalRequest = new GenericBucketRequest(bucketName);
Request request = createRequest(bucketName, key, originalRequest, HttpMethodName.GET);
request.addParameter("acl", null);
if (versionId != null) {
request.addParameter("versionId", versionId);
}
populateRequesterPaysHeader(request, isRequesterPays);
@SuppressWarnings("unchecked")
ResponseHeaderHandlerChain responseHandler = new ResponseHeaderHandlerChain(
new Unmarshallers.AccessControlListUnmarshaller(),
new S3RequesterChargedHeaderHandler());
return invoke(request, responseHandler, bucketName, key);
}
/**
* Sets the Canned ACL for the specified resource in S3. If only bucketName
* is specified, the canned ACL will be applied to the bucket, otherwise if
* bucketName and key are specified, the canned ACL will be applied to the
* object.
*
* @param bucketName
* The name of the bucket containing the specified key, or if no
* key is listed, the bucket whose ACL will be set.
* @param key
* The optional object key within the specified bucket whose ACL
* will be set. If not specified, the bucket ACL will be set.
* @param versionId
* The version ID of the object version whose ACL is being set.
* @param cannedAcl
* The canned ACL to apply to the resource.
* @param originalRequest
* The original, user facing request object.
*/
private void setAcl(String bucketName, String key, String versionId, CannedAccessControlList cannedAcl, boolean isRequesterPays,
AmazonWebServiceRequest originalRequest) {
if (originalRequest == null) originalRequest = new GenericBucketRequest(bucketName);
Request request = createRequest(bucketName, key, originalRequest, HttpMethodName.PUT);
request.addParameter("acl", null);
request.addHeader(Headers.S3_CANNED_ACL, cannedAcl.toString());
if (versionId != null) request.addParameter("versionId", versionId);
populateRequesterPaysHeader(request, isRequesterPays);
invoke(request, voidResponseHandler, bucketName, key);
}
/**
* Sets the ACL for the specified resource in S3. If only bucketName is
* specified, the ACL will be applied to the bucket, otherwise if bucketName
* and key are specified, the ACL will be applied to the object.
*
* @param bucketName
* The name of the bucket containing the specified key, or if no
* key is listed, the bucket whose ACL will be set.
* @param key
* The optional object key within the specified bucket whose ACL
* will be set. If not specified, the bucket ACL will be set.
* @param versionId
* The version ID of the object version whose ACL is being set.
* @param acl
* The ACL to apply to the resource.
* @param originalRequest
* The original, user facing request object.
*/
private void setAcl(String bucketName, String key, String versionId, AccessControlList acl, boolean isRequesterPays,
AmazonWebServiceRequest originalRequest) {
if (originalRequest == null) originalRequest = new GenericBucketRequest(bucketName);
Request request = createRequest(bucketName, key, originalRequest, HttpMethodName.PUT);
request.addParameter("acl", null);
if (versionId != null) request.addParameter("versionId", versionId);
populateRequesterPaysHeader(request, isRequesterPays);
byte[] aclAsXml = new AclXmlFactory().convertToXmlByteArray(acl);
request.addHeader("Content-Type", "application/xml");
request.addHeader("Content-Length", String.valueOf(aclAsXml.length));
request.setContent(new ByteArrayInputStream(aclAsXml));
invoke(request, voidResponseHandler, bucketName, key);
}
/**
* Returns a "complete" S3 specific signer, taking into the S3 bucket, key,
* and the current S3 client configuration into account.
*/
protected Signer createSigner(final Request> request,
final String bucketName,
final String key) {
return createSigner(request, bucketName, key, false);
}
/**
* Returns a "complete" S3 specific signer, taking into the S3 bucket, key,
* and the current S3 client configuration into account.
*/
protected Signer createSigner(final Request> request,
final String bucketName,
final String key,
final boolean isAdditionalHeadRequestToFindRegion) {
// Instead of using request.getEndpoint() for this parameter, we use endpoint which is because
// in accelerate mode, the endpoint in request is regionless. We need the client-wide endpoint
// to fetch the region information and pick the correct signer.
URI uri = clientOptions.isAccelerateModeEnabled() ? endpoint : request.getEndpoint();
final Signer signer = getSignerByURI(uri);
if(this.awsCredentialsProvider.getCredentials() instanceof IBMOAuthCredentials) {
IBMOAuthCredentials oAuthCreds = (IBMOAuthCredentials)this.awsCredentialsProvider.getCredentials();
if (oAuthCreds.getApiKey() != null || oAuthCreds.getTokenManager() != null) {
return new IBMOAuthSigner(clientConfiguration);
}
}
if (!isSignerOverridden()) {
if ((signer instanceof AWSS3V4Signer) && bucketRegionShouldBeCached(request)) {
String region = bucketRegionCache.get(bucketName);
if (region != null) {
// If cache contains the region for the bucket, create an endpoint for the region and
// update the request with that endpoint.
resolveRequestEndpoint(request, bucketName, key, RuntimeHttpUtils.toUri(RegionUtils.getRegion(region).getServiceEndpoint(S3_SERVICE_NAME), clientConfiguration));
return updateSigV4SignerWithRegion((AWSS3V4Signer) signer, region);
} else if (request.getOriginalRequest() instanceof GeneratePresignedUrlRequest) {
return createSigV2Signer(request, bucketName, key);
} else if (isAdditionalHeadRequestToFindRegion) {
return updateSigV4SignerWithRegion((AWSS3V4Signer) signer, "us-east-1");
}
}
String regionOverride = getSignerRegionOverride();
if (regionOverride != null) {
return updateSigV4SignerWithRegion(new AWSS3V4Signer(), regionOverride);
}
}
if (signer instanceof S3Signer) {
// The old S3Signer needs a method and path passed to its
// constructor; if that's what we should use, getSigner()
// will return a dummy instance and we need to create a
// new one with the appropriate values for this request.
return createSigV2Signer(request, bucketName, key);
}
return signer;
}
private S3Signer createSigV2Signer(final Request> request,
final String bucketName,
final String key) {
String resourcePath = "/" +
((bucketName != null) ? bucketName + "/" : "") +
((key != null) ? key : "");
return new S3Signer(request.getHttpMethod().toString(), resourcePath);
}
private AWSS3V4Signer updateSigV4SignerWithRegion(final AWSS3V4Signer v4Signer, String region) {
v4Signer.setServiceName(getServiceNameIntern());
v4Signer.setRegionName(region);
return v4Signer;
}
/**
* Return the region string that should be used for signing requests sent by
* this client. This method can only return null if both of the following
* are true:
* (a) the user has never specified a region via setRegion/configureRegion/setSignerRegionOverride
* (b) the user has specified a client endpoint that is known to be a global S3 endpoint
*/
private String getSignerRegion() {
String region = getSignerRegionOverride();
if (region == null) {
region = clientRegion;
}
return region;
}
/**
* Has signer been explicitly overriden in the configuration?
*/
private boolean isSignerOverridden() {
return clientConfiguration != null
&& clientConfiguration.getSignerOverride() != null;
}
/**
* Returns true if the region required for signing could not be computed from the client or the request.
*
* This is the case when the standard endpoint is in use and neither an explicit region nor a signer override
* have been provided by the user.
*
*/
private boolean noExplicitRegionProvided(final Request> request) {
return isStandardEndpoint(request.getEndpoint()) &&
getSignerRegion() == null;
}
private boolean isStandardEndpoint(URI endpoint) {
return endpoint.getHost().endsWith(Constants.S3_HOSTNAME);
}
/**
* Pre-signs the specified request, using a signature query-string
* parameter.
*
* @param request
* The request to sign.
* @param methodName
* The HTTP method (GET, PUT, DELETE, HEAD) for the specified
* request.
* @param bucketName
* The name of the bucket involved in the request. If the request
* is not an operation on a bucket this parameter should be null.
* @param key
* The object key involved in the request. If the request is not
* an operation on an object, this parameter should be null.
* @param expiration
* The time at which the signed request is no longer valid, and
* will stop working.
* @param subResource
* The optional sub-resource being requested as part of the
* request (e.g. "location", "acl", "logging", or "torrent").
*/
protected void presignRequest(Request request, HttpMethod methodName,
String bucketName, String key, Date expiration, String subResource) {
// Run any additional request handlers if present
beforeRequest(request);
String resourcePath = "/" +
((bucketName != null) ? bucketName + "/" : "") +
((key != null) ? SdkHttpUtils.urlEncode(key, true) : "") +
((subResource != null) ? "?" + subResource : "");
// Make sure the resource-path for signing does not contain
// any consecutive "/"s.
// Note that we should also follow the same rule to escape
// consecutive "/"s when generating the presigned URL.
// See ServiceUtils#convertRequestToUrl(...)
resourcePath = resourcePath.replaceAll("(?<=/)/", "%2F");
new S3QueryStringSigner(methodName.toString(), resourcePath, expiration)
.sign(request, CredentialUtils.getCredentialsProvider(request.getOriginalRequest(), awsCredentialsProvider).getCredentials());
// The Amazon S3 DevPay token header is a special exception and can be safely moved
// from the request's headers into the query string to ensure that it travels along
// with the pre-signed URL when it's sent back to Amazon S3.
if (request.getHeaders().containsKey(Headers.SECURITY_TOKEN)) {
String value = request.getHeaders().get(Headers.SECURITY_TOKEN);
request.addParameter(Headers.SECURITY_TOKEN, value);
request.getHeaders().remove(Headers.SECURITY_TOKEN);
}
}
private void beforeRequest(Request request) {
if (requestHandler2s != null) {
for (RequestHandler2 requestHandler2 : requestHandler2s) {
requestHandler2.beforeRequest(request);
}
}
}
/**
*
* Populates the specified request object with the appropriate headers from
* the {@link ObjectMetadata} object.
*
*
* @param request
* The request to populate with headers.
* @param metadata
* The metadata containing the header information to include in
* the request.
*/
protected static void populateRequestMetadata(Request> request, ObjectMetadata metadata) {
Map rawMetadata = metadata.getRawMetadata();
if (rawMetadata != null) {
for (Entry entry : rawMetadata.entrySet()) {
request.addHeader(entry.getKey(), entry.getValue().toString());
}
}
Date httpExpiresDate = metadata.getHttpExpiresDate();
if (httpExpiresDate != null) {
request.addHeader(Headers.EXPIRES, DateUtils.formatRFC822Date(httpExpiresDate));
}
Map userMetadata = metadata.getUserMetadata();
if (userMetadata != null) {
for (Entry entry : userMetadata.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
if (key != null) key = key.trim();
if (value != null) value = value.trim();
request.addHeader(Headers.S3_USER_METADATA_PREFIX + key, value);
}
}
}
/**
*
* Populate the specified request with {@link Constants#REQUESTER_PAYS} to header {@link Headers#REQUESTER_PAYS_HEADER},
* if isRequesterPays is true.
*
*
* @param request
* The specified request to populate.
* @param isRequesterPays
* The flag whether to populate the header or not.
*/
protected static void populateRequesterPaysHeader(Request> request, boolean isRequesterPays) {
if (isRequesterPays) {
request.addHeader(Headers.REQUESTER_PAYS_HEADER, Constants.REQUESTER_PAYS);
}
}
/**
*
* Populates the specified request with the specified Multi-Factor
* Authentication (MFA) details. This includes the MFA header with device serial
* number and generated token. Since all requests which include the MFA
* header must be sent over HTTPS, this operation also configures the request object to
* use HTTPS instead of HTTP.
*
*
* @param request
* The request to populate.
* @param mfa
* The Multi-Factor Authentication information.
*/
private void populateRequestWithMfaDetails(Request> request, MultiFactorAuthentication mfa) {
if (mfa == null) return;
String endpoint = request.getEndpoint().toString();
if (endpoint.startsWith("http://")) {
String httpsEndpoint = endpoint.replace("http://", "https://");
request.setEndpoint(URI.create(httpsEndpoint));
log.info("Overriding current endpoint to use HTTPS " +
"as required by S3 for requests containing an MFA header");
}
request.addHeader(Headers.S3_MFA,
mfa.getDeviceSerialNumber() + " " + mfa.getToken());
}
/**
*
* Populates the specified request with the numerous options available in
* CopyObjectRequest
.
*
*
* @param request
* The request to populate with headers to represent all the
* options expressed in the CopyObjectRequest
object.
* @param copyObjectRequest
* The object containing all the options for copying an object in
* Amazon S3.
*/
private void populateRequestWithCopyObjectParameters(Request extends AmazonWebServiceRequest> request, CopyObjectRequest copyObjectRequest) {
String copySourceHeader =
"/" + SdkHttpUtils.urlEncode(copyObjectRequest.getSourceBucketName(), true)
+ "/" + SdkHttpUtils.urlEncode(copyObjectRequest.getSourceKey(), true);
if (copyObjectRequest.getSourceVersionId() != null) {
copySourceHeader += "?versionId=" + copyObjectRequest.getSourceVersionId();
}
request.addHeader("x-amz-copy-source", copySourceHeader);
addDateHeader(request, Headers.COPY_SOURCE_IF_MODIFIED_SINCE,
copyObjectRequest.getModifiedSinceConstraint());
addDateHeader(request, Headers.COPY_SOURCE_IF_UNMODIFIED_SINCE,
copyObjectRequest.getUnmodifiedSinceConstraint());
addStringListHeader(request, Headers.COPY_SOURCE_IF_MATCH,
copyObjectRequest.getMatchingETagConstraints());
addStringListHeader(request, Headers.COPY_SOURCE_IF_NO_MATCH,
copyObjectRequest.getNonmatchingETagConstraints());
if (copyObjectRequest.getAccessControlList() != null) {
addAclHeaders(request, copyObjectRequest.getAccessControlList());
} else if (copyObjectRequest.getCannedAccessControlList() != null) {
request.addHeader(Headers.S3_CANNED_ACL,
copyObjectRequest.getCannedAccessControlList().toString());
}
if (copyObjectRequest.getStorageClass() != null) {
request.addHeader(Headers.STORAGE_CLASS, copyObjectRequest.getStorageClass());
}
if (copyObjectRequest.getRedirectLocation() != null) {
request.addHeader(Headers.REDIRECT_LOCATION, copyObjectRequest.getRedirectLocation());
}
if (copyObjectRequest.getRetentionDirective() != null) {
request.addHeader(Headers.RETENTION_DIRECTIVE, copyObjectRequest.getRetentionDirective().toString());
}
if (copyObjectRequest.getRetentionExpirationDate() != null) {
request.addHeader(Headers.RETENTION_EXPIRATION_DATE,
DateUtils.formatRFC822Date(copyObjectRequest.getRetentionExpirationDate()));
}
if (copyObjectRequest.getRetentionLegalHoldId() != null) {
request.addHeader(Headers.RETENTION_LEGAL_HOLD_ID, copyObjectRequest.getRetentionLegalHoldId());
}
if (copyObjectRequest.getRetentionPeriod() != null) {
request.addHeader(Headers.RETENTION_PERIOD, copyObjectRequest.getRetentionPeriod().toString());
}
populateRequesterPaysHeader(request, copyObjectRequest.isRequesterPays());
ObjectMetadata newObjectMetadata = copyObjectRequest.getNewObjectMetadata();
if (copyObjectRequest.getMetadataDirective() != null) {
request.addHeader(Headers.METADATA_DIRECTIVE, copyObjectRequest.getMetadataDirective());
} else if (newObjectMetadata != null) {
request.addHeader(Headers.METADATA_DIRECTIVE, "REPLACE");
}
if (newObjectMetadata != null) {
populateRequestMetadata(request, newObjectMetadata);
}
ObjectTagging newObjectTagging = copyObjectRequest.getNewObjectTagging();
if (newObjectTagging != null) {
request.addHeader(Headers.TAGGING_DIRECTIVE, "REPLACE");
request.addHeader(Headers.S3_TAGGING, urlEncodeTags(newObjectTagging));
}
// Populate the SSE-C parameters for the destination object
populateSourceSSE_C(request, copyObjectRequest.getSourceSSECustomerKey());
populateSSE_C(request, copyObjectRequest.getDestinationSSECustomerKey());
}
/**
*
* Populates the specified request with the numerous options available in
* CopyObjectRequest
.
*
*
* @param request
* The request to populate with headers to represent all the
* options expressed in the CopyPartRequest
object.
* @param copyPartRequest
* The object containing all the options for copying an object in
* Amazon S3.
*/
private static void populateRequestWithCopyPartParameters(Request> request, CopyPartRequest copyPartRequest) {
String copySourceHeader =
"/" + SdkHttpUtils.urlEncode(copyPartRequest.getSourceBucketName(), true)
+ "/" + SdkHttpUtils.urlEncode(copyPartRequest.getSourceKey(), true);
if (copyPartRequest.getSourceVersionId() != null) {
copySourceHeader += "?versionId=" + copyPartRequest.getSourceVersionId();
}
request.addHeader("x-amz-copy-source", copySourceHeader);
addDateHeader(request, Headers.COPY_SOURCE_IF_MODIFIED_SINCE,
copyPartRequest.getModifiedSinceConstraint());
addDateHeader(request, Headers.COPY_SOURCE_IF_UNMODIFIED_SINCE,
copyPartRequest.getUnmodifiedSinceConstraint());
addStringListHeader(request, Headers.COPY_SOURCE_IF_MATCH,
copyPartRequest.getMatchingETagConstraints());
addStringListHeader(request, Headers.COPY_SOURCE_IF_NO_MATCH,
copyPartRequest.getNonmatchingETagConstraints());
if ( copyPartRequest.getFirstByte() != null && copyPartRequest.getLastByte() != null ) {
String range = "bytes=" + copyPartRequest.getFirstByte() + "-" + copyPartRequest.getLastByte();
request.addHeader(Headers.COPY_PART_RANGE, range);
}
// Populate the SSE-C parameters for the destination object
populateSourceSSE_C(request, copyPartRequest.getSourceSSECustomerKey());
populateSSE_C(request, copyPartRequest.getDestinationSSECustomerKey());
}
/**
*
* Populates the specified request with the numerous attributes available in
* SSEWithCustomerKeyRequest
.
*
*
* @param request
* The request to populate with headers to represent all the
* options expressed in the
* ServerSideEncryptionWithCustomerKeyRequest
* object.
* @param sseKey
* The request object for an S3 operation that allows server-side
* encryption using customer-provided keys.
*/
private static void populateSSE_C(Request> request, SSECustomerKey sseKey) {
if (sseKey == null) return;
addHeaderIfNotNull(request, Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
sseKey.getAlgorithm());
addHeaderIfNotNull(request, Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
sseKey.getKey());
addHeaderIfNotNull(request, Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
sseKey.getMd5());
// Calculate the MD5 hash of the encryption key and fill it in the
// header, if the user didn't specify it in the metadata
if (sseKey.getKey() != null
&& sseKey.getMd5() == null) {
String encryptionKey_b64 = sseKey.getKey();
byte[] encryptionKey = Base64.decode(encryptionKey_b64);
request.addHeader(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
Md5Utils.md5AsBase64(encryptionKey));
}
}
private static void populateSourceSSE_C(Request> request, SSECustomerKey sseKey) {
if (sseKey == null) return;
// Populate the SSE-C parameters for the source object
addHeaderIfNotNull(request, Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
sseKey.getAlgorithm());
addHeaderIfNotNull(request, Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
sseKey.getKey());
addHeaderIfNotNull(request, Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
sseKey.getMd5());
// Calculate the MD5 hash of the encryption key and fill it in the
// header, if the user didn't specify it in the metadata
if (sseKey.getKey() != null
&& sseKey.getMd5() == null) {
String encryptionKey_b64 = sseKey.getKey();
byte[] encryptionKey = Base64.decode(encryptionKey_b64);
request.addHeader(Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
Md5Utils.md5AsBase64(encryptionKey));
}
}
private static void populateSSE_KMS(Request> request,
SSEAwsKeyManagementParams sseParams) {
if (sseParams != null) {
addHeaderIfNotNull(request, Headers.SERVER_SIDE_ENCRYPTION,
sseParams.getEncryption());
addHeaderIfNotNull(request,
Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
sseParams.getAwsKmsKeyId());
}
}
/**
* Adds the part number to the specified request, if partNumber is not null.
*
* @param request
* The request to add the partNumber to.
* @param partNumber
* The part number to be added.
*/
private void addPartNumberIfNotNull(Request> request, Integer partNumber) {
if (partNumber != null) {
request.addParameter("partNumber", partNumber.toString());
}
}
/**
* Adds the specified header to the specified request, if the header value
* is not null.
*
* @param request
* The request to add the header to.
* @param header
* The header name.
* @param value
* The header value.
*/
private static void addHeaderIfNotNull(Request> request, String header, String value) {
if (value != null) {
request.addHeader(header, value);
}
}
/**
* Adds the specified header to the specified request, if the header value
* is not null and is not a trimmed empty string.
*
* @param request
* The request to add the header to.
* @param header
* The header name.
* @param value
* The header value.
*/
private static void addHeaderIfNotEmpty(Request> request, String header, String value) {
if (StringUtils.hasValue(value)) {
request.addHeader(header, value);
}
}
/**
* Adds the specified header to the specified request, if the header value
* is not null and is not a trimmed empty string.
*
* @param request
* The request to add the header to.
* @param header
* The header name.
* @param value
* The header value.
*/
private static void addHeaderIfNotEmptyForAwsRequest(AmazonWebServiceRequest request, String header, String value) {
if (StringUtils.hasValue(value)) {
request.putCustomRequestHeader(header, value);
}
}
/**
* Adds the specified parameter to the specified request, if the parameter
* value is not null.
*
* @param request
* The request to add the parameter to.
* @param paramName
* The parameter name.
* @param paramValue
* The parameter value.
*/
private static void addParameterIfNotNull(Request> request, String paramName, Integer paramValue) {
if (paramValue != null) {
addParameterIfNotNull(request, paramName, paramValue.toString());
}
}
/**
* Adds the specified parameter to the specified request, if the parameter
* value is not null.
*
* @param request
* The request to add the parameter to.
* @param paramName
* The parameter name.
* @param paramValue
* The parameter value.
*/
private static void addParameterIfNotNull(Request> request, String paramName, String paramValue) {
if (paramValue != null) {
request.addParameter(paramName, paramValue);
}
}
/**
*
* Adds the specified date header in RFC 822 date format to the specified
* request.
* This method will not add a date header if the specified date value is null
.
*
*
* @param request
* The request to add the header to.
* @param header
* The header name.
* @param value
* The header value.
*/
private static void addDateHeader(Request> request, String header, Date value) {
if (value != null) {
request.addHeader(header, ServiceUtils.formatRfc822Date(value));
}
}
/**
*
* Adds the specified string list header, joined together separated with
* commas, to the specified request.
* This method will not add a string list header if the specified values
* are null
or empty.
*
*
* @param request
* The request to add the header to.
* @param header
* The header name.
* @param values
* The list of strings to join together for the header value.
*/
private static void addStringListHeader(Request> request, String header, List values) {
if (values != null && !values.isEmpty()) {
request.addHeader(header, ServiceUtils.join(values));
}
}
/**
*
* Adds response headers parameters to the request given, if non-null.
*
*
* @param request
* The request to add the response header parameters to.
* @param responseHeaders
* The full set of response headers to add, or null for none.
*/
private static void addResponseHeaderParameters(Request> request, ResponseHeaderOverrides responseHeaders) {
if ( responseHeaders != null ) {
if ( responseHeaders.getCacheControl() != null ) {
request.addParameter(ResponseHeaderOverrides.RESPONSE_HEADER_CACHE_CONTROL, responseHeaders.getCacheControl());
}
if ( responseHeaders.getContentDisposition() != null ) {
request.addParameter(ResponseHeaderOverrides.RESPONSE_HEADER_CONTENT_DISPOSITION,
responseHeaders.getContentDisposition());
}
if ( responseHeaders.getContentEncoding() != null ) {
request.addParameter(ResponseHeaderOverrides.RESPONSE_HEADER_CONTENT_ENCODING,
responseHeaders.getContentEncoding());
}
if ( responseHeaders.getContentLanguage() != null ) {
request.addParameter(ResponseHeaderOverrides.RESPONSE_HEADER_CONTENT_LANGUAGE,
responseHeaders.getContentLanguage());
}
if ( responseHeaders.getContentType() != null ) {
request.addParameter(ResponseHeaderOverrides.RESPONSE_HEADER_CONTENT_TYPE, responseHeaders.getContentType());
}
if ( responseHeaders.getExpires() != null ) {
request.addParameter(ResponseHeaderOverrides.RESPONSE_HEADER_EXPIRES, responseHeaders.getExpires());
}
}
}
/**
* Returns the URL to the key in the bucket given, using the client's scheme
* and endpoint. Returns null if the given bucket and key cannot be
* converted to a URL.
*/
public String getResourceUrl(String bucketName, String key) {
try {
return getUrl(bucketName, key).toString();
} catch ( Exception e ) {
return null;
}
}
@Override
public URL getUrl(String bucketName, String key) {
Request> request = new DefaultRequest