com.azure.storage.blob.specialized.BlobAsyncClientBase Maven / Gradle / Ivy
Show all versions of azure-storage-blob Show documentation
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.storage.blob.specialized;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceMethod;
import com.azure.core.http.HttpPipeline;
import com.azure.core.http.HttpResponse;
import com.azure.core.http.RequestConditions;
import com.azure.core.http.rest.Response;
import com.azure.core.http.rest.SimpleResponse;
import com.azure.core.http.rest.StreamResponse;
import com.azure.core.util.BinaryData;
import com.azure.core.util.Context;
import com.azure.core.util.CoreUtils;
import com.azure.core.util.FluxUtil;
import com.azure.core.util.ProgressListener;
import com.azure.core.util.ProgressReporter;
import com.azure.core.util.io.IOUtils;
import com.azure.core.util.logging.ClientLogger;
import com.azure.core.util.polling.LongRunningOperationStatus;
import com.azure.core.util.polling.PollResponse;
import com.azure.core.util.polling.PollerFlux;
import com.azure.storage.blob.BlobContainerAsyncClient;
import com.azure.storage.blob.BlobContainerClientBuilder;
import com.azure.storage.blob.BlobServiceAsyncClient;
import com.azure.storage.blob.BlobServiceVersion;
import com.azure.storage.blob.implementation.AzureBlobStorageImpl;
import com.azure.storage.blob.implementation.AzureBlobStorageImplBuilder;
import com.azure.storage.blob.implementation.accesshelpers.BlobDownloadAsyncResponseConstructorProxy;
import com.azure.storage.blob.implementation.accesshelpers.BlobPropertiesConstructorProxy;
import com.azure.storage.blob.implementation.models.BlobPropertiesInternalGetProperties;
import com.azure.storage.blob.implementation.models.BlobTag;
import com.azure.storage.blob.implementation.models.BlobTags;
import com.azure.storage.blob.implementation.models.BlobsDownloadHeaders;
import com.azure.storage.blob.implementation.models.BlobsGetAccountInfoHeaders;
import com.azure.storage.blob.implementation.models.BlobsSetImmutabilityPolicyHeaders;
import com.azure.storage.blob.implementation.models.BlobsStartCopyFromURLHeaders;
import com.azure.storage.blob.implementation.models.EncryptionScope;
import com.azure.storage.blob.implementation.models.InternalBlobLegalHoldResult;
import com.azure.storage.blob.implementation.models.QueryRequest;
import com.azure.storage.blob.implementation.models.QuerySerialization;
import com.azure.storage.blob.implementation.util.BlobQueryReader;
import com.azure.storage.blob.implementation.util.BlobRequestConditionProperty;
import com.azure.storage.blob.implementation.util.BlobSasImplUtil;
import com.azure.storage.blob.implementation.util.ChunkedDownloadUtils;
import com.azure.storage.blob.implementation.util.ModelHelper;
import com.azure.storage.blob.models.AccessTier;
import com.azure.storage.blob.models.BlobBeginCopySourceRequestConditions;
import com.azure.storage.blob.models.BlobCopyInfo;
import com.azure.storage.blob.models.BlobDownloadAsyncResponse;
import com.azure.storage.blob.models.BlobDownloadContentAsyncResponse;
import com.azure.storage.blob.models.BlobDownloadHeaders;
import com.azure.storage.blob.models.BlobErrorCode;
import com.azure.storage.blob.models.BlobHttpHeaders;
import com.azure.storage.blob.models.BlobImmutabilityPolicy;
import com.azure.storage.blob.models.BlobImmutabilityPolicyMode;
import com.azure.storage.blob.models.BlobLegalHoldResult;
import com.azure.storage.blob.models.BlobProperties;
import com.azure.storage.blob.models.BlobQueryAsyncResponse;
import com.azure.storage.blob.models.BlobRange;
import com.azure.storage.blob.models.BlobRequestConditions;
import com.azure.storage.blob.models.BlobStorageException;
import com.azure.storage.blob.models.CopyStatusType;
import com.azure.storage.blob.models.CpkInfo;
import com.azure.storage.blob.models.CustomerProvidedKey;
import com.azure.storage.blob.models.DeleteSnapshotsOptionType;
import com.azure.storage.blob.models.DownloadRetryOptions;
import com.azure.storage.blob.models.ParallelTransferOptions;
import com.azure.storage.blob.models.RehydratePriority;
import com.azure.storage.blob.models.StorageAccountInfo;
import com.azure.storage.blob.models.UserDelegationKey;
import com.azure.storage.blob.options.BlobBeginCopyOptions;
import com.azure.storage.blob.options.BlobCopyFromUrlOptions;
import com.azure.storage.blob.options.BlobDownloadToFileOptions;
import com.azure.storage.blob.options.BlobGetTagsOptions;
import com.azure.storage.blob.options.BlobQueryOptions;
import com.azure.storage.blob.options.BlobSetAccessTierOptions;
import com.azure.storage.blob.options.BlobSetTagsOptions;
import com.azure.storage.blob.sas.BlobServiceSasSignatureValues;
import com.azure.storage.common.StorageSharedKeyCredential;
import com.azure.storage.common.Utility;
import com.azure.storage.common.implementation.SasImplUtils;
import com.azure.storage.common.implementation.StorageImplUtils;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.SignalType;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.net.URLEncoder;
import java.nio.ByteBuffer;
import java.nio.channels.AsynchronousFileChannel;
import java.nio.charset.Charset;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
import java.nio.file.OpenOption;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.time.Duration;
import java.time.OffsetDateTime;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeoutException;
import java.util.function.BiFunction;
import static com.azure.core.util.FluxUtil.fluxError;
import static com.azure.core.util.FluxUtil.monoError;
import static com.azure.core.util.FluxUtil.withContext;
import static com.azure.core.util.tracing.Tracer.AZ_TRACING_NAMESPACE_KEY;
import static com.azure.storage.common.Utility.STORAGE_TRACING_NAMESPACE_VALUE;
/**
* This class provides a client that contains all operations that apply to any blob type.
*
*
* This client offers the ability to download blobs. Note that uploading data is specific to each type of blob. Please
* refer to the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient} for upload options.
*/
public class BlobAsyncClientBase {
private static final ClientLogger LOGGER = new ClientLogger(BlobAsyncClientBase.class);
/**
* Backing REST client for the blob client.
*/
protected final AzureBlobStorageImpl azureBlobStorage;
private final String snapshot;
private final String versionId;
private final CpkInfo customerProvidedKey;
/**
* Encryption scope of the blob.
*/
protected final EncryptionScope encryptionScope;
/**
* Storage account name that contains the blob.
*/
protected final String accountName;
/**
* Container name that contains the blob.
*/
protected final String containerName;
/**
* Name of the blob.
*/
protected final String blobName;
/**
* Storage REST API version used in requests to the Storage service.
*/
protected final BlobServiceVersion serviceVersion;
/**
* Protected constructor for use by {@link SpecializedBlobClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param blobName The blob name.
* @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
*/
protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion,
String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) {
this(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, null);
}
/**
* Protected constructor for use by {@link SpecializedBlobClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param blobName The blob name.
* @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
*/
protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion,
String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey,
EncryptionScope encryptionScope) {
this(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey,
encryptionScope, null);
}
/**
* Protected constructor for use by {@link SpecializedBlobClientBuilder}.
*
* @param pipeline The pipeline used to send and receive service requests.
* @param url The endpoint where to send service requests.
* @param serviceVersion The version of the service to receive requests.
* @param accountName The storage account name.
* @param containerName The container name.
* @param blobName The blob name.
* @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly.
* @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass
* {@code null} to allow the service to use its own encryption.
* @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version.
*/
protected BlobAsyncClientBase(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion,
String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey,
EncryptionScope encryptionScope, String versionId) {
if (snapshot != null && versionId != null) {
throw LOGGER.logExceptionAsError(
new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time."));
}
this.azureBlobStorage = new AzureBlobStorageImplBuilder()
.pipeline(pipeline)
.url(url)
.version(serviceVersion.getVersion())
.buildClient();
this.serviceVersion = serviceVersion;
this.accountName = accountName;
this.containerName = containerName;
this.blobName = Utility.urlDecode(blobName);
this.snapshot = snapshot;
this.customerProvidedKey = customerProvidedKey;
this.encryptionScope = encryptionScope;
this.versionId = versionId;
/* Check to make sure the uri is valid. We don't want the error to occur later in the generated layer
when the sas token has already been applied. */
try {
URI.create(getBlobUrl());
} catch (IllegalArgumentException ex) {
throw LOGGER.logExceptionAsError(ex);
}
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
protected String getEncryptionScope() {
if (encryptionScope == null) {
return null;
}
return encryptionScope.getEncryptionScope();
}
/**
* Creates a new {@link BlobAsyncClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobAsyncClientBase} used to interact with the specific snapshot.
*/
public BlobAsyncClientBase getSnapshotClient(String snapshot) {
return new BlobAsyncClientBase(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey(), encryptionScope, getVersionId());
}
/**
* Creates a new {@link BlobAsyncClientBase} linked to the {@code versionId} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobAsyncClientBase} used to interact with the specific version.
*/
public BlobAsyncClientBase getVersionClient(String versionId) {
return new BlobAsyncClientBase(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), encryptionScope, versionId);
}
/**
* Creates a new {@link BlobAsyncClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobAsyncClientBase} with the specified {@code encryptionScope}.
*/
public BlobAsyncClientBase getEncryptionScopeAsyncClient(String encryptionScope) {
EncryptionScope finalEncryptionScope = null;
if (encryptionScope != null) {
finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope);
}
return new BlobAsyncClientBase(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey(), finalEncryptionScope,
getVersionId());
}
/**
* Creates a new {@link BlobAsyncClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobAsyncClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobAsyncClientBase getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) {
CpkInfo finalCustomerProvidedKey = null;
if (customerProvidedKey != null) {
finalCustomerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return new BlobAsyncClientBase(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(),
getContainerName(), getBlobName(), snapshot, finalCustomerProvidedKey, encryptionScope,
getVersionId());
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return azureBlobStorage.getUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
String blobUrl = azureBlobStorage.getUrl() + "/" + containerName + "/" + Utility.urlEncode(blobName);
if (this.isSnapshot()) {
blobUrl = Utility.appendQueryParameter(blobUrl, "snapshot", getSnapshotId());
}
if (this.getVersionId() != null) {
blobUrl = Utility.appendQueryParameter(blobUrl, "versionid", getVersionId());
}
return blobUrl;
}
/**
* Get the container name.
*
*
Code Samples
*
*
*
* String containerName = client.getContainerName();
* System.out.println("The name of the container is " + containerName);
*
*
*
* @return The name of the container.
*/
public final String getContainerName() {
return containerName;
}
/**
* Get an async client pointing to the parent container.
*
* Code Samples
*
*
*
* BlobContainerAsyncClient containerClient = client.getContainerAsyncClient();
* System.out.println("The name of the container is " + containerClient.getBlobContainerName());
*
*
*
* @return {@link BlobContainerAsyncClient}
*/
public BlobContainerAsyncClient getContainerAsyncClient() {
return getContainerClientBuilder().buildAsyncClient();
}
final BlobContainerClientBuilder getContainerClientBuilder() {
CustomerProvidedKey encryptionKey = this.customerProvidedKey == null ? null
: new CustomerProvidedKey(this.customerProvidedKey.getEncryptionKey());
return new BlobContainerClientBuilder()
.endpoint(this.getBlobUrl())
.pipeline(this.getHttpPipeline())
.serviceVersion(this.serviceVersion)
.customerProvidedKey(encryptionKey)
.encryptionScope(this.getEncryptionScope());
}
/**
* Decodes and gets the blob name.
*
* Code Samples
*
*
*
* String blobName = client.getBlobName();
* System.out.println("The name of the blob is " + blobName);
*
*
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return blobName; // The blob name is decoded when the client is constructor
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return azureBlobStorage.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return customerProvidedKey;
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return accountName;
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return serviceVersion;
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return this.snapshot;
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return this.snapshot != null;
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return this.versionId;
}
/**
* Determines if the blob this client represents exists in the cloud.
*
* Code Samples
*
*
*
* client.exists().subscribe(response -> System.out.printf("Exists? %b%n", response));
*
*
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono exists() {
return existsWithResponse().flatMap(FluxUtil::toMono);
}
/**
* Determines if the blob this client represents exists in the cloud.
*
* Code Samples
*
*
*
* client.existsWithResponse().subscribe(response -> System.out.printf("Exists? %b%n", response.getValue()));
*
*
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> existsWithResponse() {
try {
return withContext(this::existsWithResponse);
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> existsWithResponse(Context context) {
return this.getPropertiesWithResponse(null, context)
.map(cp -> (Response) new SimpleResponse<>(cp, true))
.onErrorResume(t -> t instanceof BlobStorageException
&& BlobErrorCode.BLOB_USES_CUSTOMER_SPECIFIED_ENCRYPTION
.equals(((BlobStorageException) t).getErrorCode()),
t -> {
HttpResponse response = ((BlobStorageException) t).getResponse();
return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), true));
})
.onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404,
t -> {
HttpResponse response = ((BlobStorageException) t).getResponse();
return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false));
});
}
/**
* Copies the data at the source URL to a blob.
*
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
*
Code Samples
*
*
*
* client.beginCopy(url, Duration.ofSeconds(3))
* .subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
*
*
*
* For more information, see the
* Azure Docs
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been
* cancelled.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PollerFlux beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl, null, null, null, null, null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
*
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
*
Starting a copy operation
* Starting a copy operation and polling on the responses.
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* RequestConditions modifiedRequestConditions = new RequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* client.beginCopy(url, metadata, AccessTier.HOT, RehydratePriority.STANDARD,
* modifiedRequestConditions, blobRequestConditions, Duration.ofSeconds(2))
* .subscribe(response -> {
* BlobCopyInfo info = response.getValue();
* System.out.printf("CopyId: %s. Status: %s%n", info.getCopyId(), info.getCopyStatus());
* });
*
*
*
* For more information, see the
* Azure Docs
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been
* cancelled.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PollerFlux beginCopy(String sourceUrl, Map metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
try {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
} catch (RuntimeException ex) {
return PollerFlux.error(LOGGER.logExceptionAsError(ex));
}
}
/**
* Copies the data at the source URL to a blob.
*
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
*
Starting a copy operation
* Starting a copy operation and polling on the responses.
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* Map<String, String> tags = Collections.singletonMap("tag", "value");
* BlobBeginCopySourceRequestConditions modifiedRequestConditions = new BlobBeginCopySourceRequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* client.beginCopy(new BlobBeginCopyOptions(url).setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
* .setRehydratePriority(RehydratePriority.STANDARD).setSourceRequestConditions(modifiedRequestConditions)
* .setDestinationRequestConditions(blobRequestConditions).setPollInterval(Duration.ofSeconds(2)))
* .subscribe(response -> {
* BlobCopyInfo info = response.getValue();
* System.out.printf("CopyId: %s. Status: %s%n", info.getCopyId(), info.getCopyStatus());
* });
*
*
*
* Cancelling a copy operation
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* Map<String, String> tags = Collections.singletonMap("tag", "value");
* BlobBeginCopySourceRequestConditions modifiedRequestConditions = new BlobBeginCopySourceRequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* PollerFlux<BlobCopyInfo, Void> poller = client.beginCopy(new BlobBeginCopyOptions(url)
* .setMetadata(metadata).setTags(tags).setTier(AccessTier.HOT)
* .setRehydratePriority(RehydratePriority.STANDARD).setSourceRequestConditions(modifiedRequestConditions)
* .setDestinationRequestConditions(blobRequestConditions).setPollInterval(Duration.ofSeconds(2)));
*
* poller.take(Duration.ofMinutes(30))
* .last()
* .flatMap(asyncPollResponse -> {
* if (!asyncPollResponse.getStatus().isComplete()) {
* return asyncPollResponse
* .cancelOperation()
* .then(Mono.error(new RuntimeException("Blob copy taking long time, "
* + "operation is cancelled!")));
* }
* return Mono.just(asyncPollResponse);
* }).block();
*
*
*
* For more information, see the
* Azure Docs
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link PollerFlux} that polls the blob copy operation until it has completed, has failed, or has been
* cancelled.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PollerFlux beginCopy(BlobBeginCopyOptions options) {
StorageImplUtils.assertNotNull("options", options);
final Duration interval = options.getPollInterval() != null
? options.getPollInterval() : Duration.ofSeconds(1);
final BlobBeginCopySourceRequestConditions sourceModifiedCondition =
options.getSourceRequestConditions() == null
? new BlobBeginCopySourceRequestConditions()
: options.getSourceRequestConditions();
final BlobRequestConditions destinationRequestConditions =
options.getDestinationRequestConditions() == null
? new BlobRequestConditions()
: options.getDestinationRequestConditions();
final BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null
? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy();
return new PollerFlux<>(interval,
(pollingContext) -> {
try {
return onStart(options.getSourceUrl(), options.getMetadata(), options.getTags(),
options.getTier(), options.getRehydratePriority(), options.isSealDestination(),
sourceModifiedCondition, destinationRequestConditions, immutabilityPolicy,
options.isLegalHold());
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
},
(pollingContext) -> {
try {
return onPoll(pollingContext.getLatestResponse());
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
},
(pollingContext, firstResponse) -> {
if (firstResponse == null || firstResponse.getValue() == null) {
return Mono.error(LOGGER.logExceptionAsError(
new IllegalArgumentException("Cannot cancel a poll response that never started.")));
}
final String copyIdentifier = firstResponse.getValue().getCopyId();
if (!CoreUtils.isNullOrEmpty(copyIdentifier)) {
LOGGER.info("Cancelling copy operation for copy id: {}", copyIdentifier);
return abortCopyFromUrl(copyIdentifier).thenReturn(firstResponse.getValue());
}
return Mono.empty();
},
(pollingContext) -> Mono.empty());
}
private Mono onStart(String sourceUrl, Map metadata, Map tags,
AccessTier tier, RehydratePriority priority, Boolean sealBlob,
BlobBeginCopySourceRequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destinationRequestConditions, BlobImmutabilityPolicy immutabilityPolicy,
Boolean legalHold) {
try {
new URL(sourceUrl);
} catch (MalformedURLException ex) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex));
}
return withContext(context -> azureBlobStorage.getBlobs().startCopyFromURLWithResponseAsync(containerName,
blobName, sourceUrl, null, metadata, tier, priority, sourceModifiedRequestConditions.getIfModifiedSince(),
sourceModifiedRequestConditions.getIfUnmodifiedSince(), sourceModifiedRequestConditions.getIfMatch(),
sourceModifiedRequestConditions.getIfNoneMatch(), sourceModifiedRequestConditions.getTagsConditions(),
destinationRequestConditions.getIfModifiedSince(), destinationRequestConditions.getIfUnmodifiedSince(),
destinationRequestConditions.getIfMatch(), destinationRequestConditions.getIfNoneMatch(),
destinationRequestConditions.getTagsConditions(), destinationRequestConditions.getLeaseId(), null,
tagsToString(tags), sealBlob, immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(),
legalHold, context))
.map(response -> {
final BlobsStartCopyFromURLHeaders headers = response.getDeserializedHeaders();
return new BlobCopyInfo(sourceUrl, headers.getXMsCopyId(), headers.getXMsCopyStatus(),
headers.getETag(), headers.getLastModified(), ModelHelper.getErrorCode(response.getHeaders()),
headers.getXMsVersionId());
});
}
String tagsToString(Map tags) {
if (tags == null || tags.isEmpty()) {
return null;
}
StringBuilder sb = new StringBuilder();
for (Map.Entry entry : tags.entrySet()) {
try {
sb.append(URLEncoder.encode(entry.getKey(), Charset.defaultCharset().toString()));
sb.append("=");
sb.append(URLEncoder.encode(entry.getValue(), Charset.defaultCharset().toString()));
sb.append("&");
} catch (UnsupportedEncodingException e) {
throw LOGGER.logExceptionAsError(new IllegalStateException(e));
}
}
sb.deleteCharAt(sb.length() - 1); // Remove the last '&'
return sb.toString();
}
private Mono> onPoll(PollResponse pollResponse) {
if (pollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED
|| pollResponse.getStatus() == LongRunningOperationStatus.FAILED) {
return Mono.just(pollResponse);
}
final BlobCopyInfo lastInfo = pollResponse.getValue();
if (lastInfo == null) {
LOGGER.warning("BlobCopyInfo does not exist. Activation operation failed.");
return Mono.just(new PollResponse<>(
LongRunningOperationStatus.fromString("COPY_START_FAILED", true), null));
}
return getProperties().map(response -> {
final CopyStatusType status = response.getCopyStatus();
final BlobCopyInfo result = new BlobCopyInfo(response.getCopySource(), response.getCopyId(), status,
response.getETag(), response.getCopyCompletionTime(), response.getCopyStatusDescription(),
response.getVersionId());
LongRunningOperationStatus operationStatus;
switch (status) {
case SUCCESS:
operationStatus = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED;
break;
case FAILED:
operationStatus = LongRunningOperationStatus.FAILED;
break;
case ABORTED:
operationStatus = LongRunningOperationStatus.USER_CANCELLED;
break;
case PENDING:
operationStatus = LongRunningOperationStatus.IN_PROGRESS;
break;
default:
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"CopyStatusType is not supported. Status: " + status));
}
return new PollResponse<>(operationStatus, result);
}).onErrorReturn(
new PollResponse<>(LongRunningOperationStatus.fromString("POLLING_FAILED", true), lastInfo));
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* Code Samples
*
*
*
* client.abortCopyFromUrl(copyId).doOnSuccess(response -> System.out.println("Aborted copy from URL"));
*
*
*
* For more information, see the
* Azure Docs
*
* @param copyId The id of the copy operation to abort.
* @return A reactive response signalling completion.
* @see #copyFromUrl(String)
* @see #beginCopy(String, Duration)
* @see #beginCopy(String, Map, AccessTier, RehydratePriority, RequestConditions, BlobRequestConditions, Duration)
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono abortCopyFromUrl(String copyId) {
return abortCopyFromUrlWithResponse(copyId, null).flatMap(FluxUtil::toMono);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* Code Samples
*
*
*
* client.abortCopyFromUrlWithResponse(copyId, leaseId)
* .subscribe(response -> System.out.printf("Aborted copy completed with status %d%n", response.getStatusCode()));
*
*
*
* For more information, see the
* Azure Docs
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @return A reactive response signalling completion.
* @see #copyFromUrl(String)
* @see #beginCopy(String, Duration)
* @see #beginCopy(String, Map, AccessTier, RehydratePriority, RequestConditions, BlobRequestConditions, Duration)
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> abortCopyFromUrlWithResponse(String copyId, String leaseId) {
try {
return withContext(context -> abortCopyFromUrlWithResponse(copyId, leaseId, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> abortCopyFromUrlWithResponse(String copyId, String leaseId, Context context) {
return this.azureBlobStorage.getBlobs().abortCopyFromURLWithResponseAsync(
containerName, blobName, copyId, null, leaseId, null, context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
*
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
*
Code Samples
*
*
*
* client.copyFromUrl(url).subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
*
*
*
* For more information, see the
* Azure Docs
*
* @param copySource The source URL to copy from.
* @return A reactive response containing the copy ID for the long-running operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null).flatMap(FluxUtil::toMono);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
*
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
*
Code Samples
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* RequestConditions modifiedRequestConditions = new RequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* client.copyFromUrlWithResponse(url, metadata, AccessTier.HOT, modifiedRequestConditions, blobRequestConditions)
* .subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
*
*
*
* For more information, see the
* Azure Docs
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @return A reactive response containing the copy ID for the long-running operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> copyFromUrlWithResponse(String copySource, Map metadata,
AccessTier tier, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions) {
try {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
*
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
*
Code Samples
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* Map<String, String> tags = Collections.singletonMap("tag", "value");
* RequestConditions modifiedRequestConditions = new RequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* client.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(url).setMetadata(metadata).setTags(tags)
* .setTier(AccessTier.HOT).setSourceRequestConditions(modifiedRequestConditions)
* .setDestinationRequestConditions(blobRequestConditions))
* .subscribe(response -> System.out.printf("Copy identifier: %s%n", response));
*
*
*
* For more information, see the
* Azure Docs
*
* @param options {@link BlobCopyFromUrlOptions}
* @return A reactive response containing the copy ID for the long-running operation.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> copyFromUrlWithResponse(BlobCopyFromUrlOptions options) {
try {
return withContext(context -> copyFromUrlWithResponse(options, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Context context) {
StorageImplUtils.assertNotNull("options", options);
RequestConditions sourceModifiedRequestConditions = options.getSourceRequestConditions() == null
? new RequestConditions() : options.getSourceRequestConditions();
BlobRequestConditions destRequestConditions = options.getDestinationRequestConditions() == null
? new BlobRequestConditions() : options.getDestinationRequestConditions();
BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null
? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy();
try {
new URL(options.getCopySource());
} catch (MalformedURLException ex) {
throw LOGGER.logExceptionAsError(new IllegalArgumentException("'copySource' is not a valid url.", ex));
}
String sourceAuth = options.getSourceAuthorization() == null
? null : options.getSourceAuthorization().toString();
return this.azureBlobStorage.getBlobs().copyFromURLWithResponseAsync(
containerName, blobName, options.getCopySource(), null, options.getMetadata(), options.getTier(),
sourceModifiedRequestConditions.getIfModifiedSince(),
sourceModifiedRequestConditions.getIfUnmodifiedSince(), sourceModifiedRequestConditions.getIfMatch(),
sourceModifiedRequestConditions.getIfNoneMatch(), destRequestConditions.getIfModifiedSince(),
destRequestConditions.getIfUnmodifiedSince(), destRequestConditions.getIfMatch(),
destRequestConditions.getIfNoneMatch(), destRequestConditions.getTagsConditions(),
destRequestConditions.getLeaseId(), null, null,
tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(),
options.hasLegalHold(), sourceAuth, options.getCopySourceTagsMode(), this.encryptionScope, context)
.map(rb -> new SimpleResponse<>(rb, rb.getDeserializedHeaders().getXMsCopyId()));
}
/**
* Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or
* {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
* client.download().subscribe(piece -> {
* try {
* downloadData.write(piece.array());
* } catch (IOException ex) {
* throw new UncheckedIOException(ex);
* }
* });
*
*
*
* For more information, see the
* Azure Docs
*
* This method will be deprecated in the future. Use {@link #downloadStream()} instead.
*
* @return A reactive response containing the blob data.
* @deprecated use {@link #downloadStream()} instead.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
@Deprecated
public Flux download() {
return downloadStream();
}
/**
* Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or
* {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
* client.downloadStream().subscribe(piece -> {
* try {
* downloadData.write(piece.array());
* } catch (IOException ex) {
* throw new UncheckedIOException(ex);
* }
* });
*
*
*
* For more information, see the
* Azure Docs
*
* @return A reactive response containing the blob data.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public Flux downloadStream() {
return downloadWithResponse(null, null, null, false).flatMapMany(BlobDownloadAsyncResponse::getValue);
}
/**
* Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or
* {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* client.downloadContent().subscribe(data -> {
* System.out.printf("Downloaded %s", data.toString());
* });
*
*
*
* For more information, see the
* Azure Docs
*
* This method supports downloads up to 2GB of data.
* Use {@link #downloadStream()} to download larger blobs.
*
* @return A reactive response containing the blob data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono downloadContent() {
return downloadWithResponse(null, null, null, false)
.flatMap(response -> BinaryData.fromFlux(response.getValue()));
}
/**
* Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link
* PageBlobClient}, or {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* BlobRange range = new BlobRange(1024, (long) 2048);
* DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
*
* client.downloadWithResponse(range, options, null, false).subscribe(response -> {
* ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
* response.getValue().subscribe(piece -> {
* try {
* downloadData.write(piece.array());
* } catch (IOException ex) {
* throw new UncheckedIOException(ex);
* }
* });
* });
*
*
*
* For more information, see the
* Azure Docs
*
* This method will be deprecated in the future.
* Use {@link #downloadStreamWithResponse(BlobRange, DownloadRetryOptions, BlobRequestConditions, boolean)} instead.
*
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @return A reactive response containing the blob data.
* @deprecated use {@link #downloadStreamWithResponse(BlobRange, DownloadRetryOptions, BlobRequestConditions, boolean)} instead.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public Mono downloadWithResponse(BlobRange range, DownloadRetryOptions options,
BlobRequestConditions requestConditions, boolean getRangeContentMd5) {
return downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5);
}
/**
* Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link
* PageBlobClient}, or {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* BlobRange range = new BlobRange(1024, (long) 2048);
* DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
*
* client.downloadStreamWithResponse(range, options, null, false).subscribe(response -> {
* ByteArrayOutputStream downloadData = new ByteArrayOutputStream();
* response.getValue().subscribe(piece -> {
* try {
* downloadData.write(piece.array());
* } catch (IOException ex) {
* throw new UncheckedIOException(ex);
* }
* });
* });
*
*
*
* For more information, see the
* Azure Docs
*
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @return A reactive response containing the blob data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono downloadStreamWithResponse(BlobRange range, DownloadRetryOptions options,
BlobRequestConditions requestConditions, boolean getRangeContentMd5) {
try {
return withContext(context ->
downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link
* PageBlobClient}, or {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
*
* client.downloadContentWithResponse(options, null).subscribe(response -> {
* BinaryData content = response.getValue();
* System.out.println(content.toString());
* });
*
*
*
* For more information, see the
* Azure Docs
*
* This method supports downloads up to 2GB of data.
* Use {@link #downloadStreamWithResponse(BlobRange, DownloadRetryOptions, BlobRequestConditions, boolean)}
* to download larger blobs.
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response containing the blob data.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono downloadContentWithResponse(
DownloadRetryOptions options,
BlobRequestConditions requestConditions) {
try {
return withContext(context -> downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r -> BinaryData.fromFlux(r.getValue())
.map(data -> new BlobDownloadContentAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(),
data, r.getDeserializedHeaders()))));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono downloadStreamWithResponse(BlobRange range, DownloadRetryOptions options,
BlobRequestConditions requestConditions, boolean getRangeContentMd5, Context context) {
BlobRange finalRange = range == null ? new BlobRange(0) : range;
Boolean getMD5 = getRangeContentMd5 ? getRangeContentMd5 : null;
BlobRequestConditions finalRequestConditions =
requestConditions == null ? new BlobRequestConditions() : requestConditions;
DownloadRetryOptions finalOptions = (options == null) ? new DownloadRetryOptions() : options;
// The first range should eagerly convert headers as they'll be used to create response types.
Context firstRangeContext = context == null ? new Context("azure-eagerly-convert-headers", true)
: context.addData("azure-eagerly-convert-headers", true);
return downloadRange(finalRange, finalRequestConditions, finalRequestConditions.getIfMatch(), getMD5,
firstRangeContext)
.map(response -> {
BlobsDownloadHeaders blobsDownloadHeaders = new BlobsDownloadHeaders(response.getHeaders());
String eTag = blobsDownloadHeaders.getETag();
BlobDownloadHeaders blobDownloadHeaders = ModelHelper.populateBlobDownloadHeaders(
blobsDownloadHeaders, ModelHelper.getErrorCode(response.getHeaders()));
/*
* If the customer did not specify a count, they are reading to the end of the blob. Extract this value
* from the response for better book-keeping towards the end.
*/
long finalCount;
long initialOffset = finalRange.getOffset();
if (finalRange.getCount() == null) {
long blobLength = ModelHelper.getBlobLength(blobDownloadHeaders);
finalCount = blobLength - initialOffset;
} else {
finalCount = finalRange.getCount();
}
// The resume function takes throwable and offset at the destination.
// I.e. offset is relative to the starting point.
BiFunction> onDownloadErrorResume = (throwable, offset) -> {
if (!(throwable instanceof IOException || throwable instanceof TimeoutException)) {
return Mono.error(throwable);
}
long newCount = finalCount - offset;
/*
* It's possible that the network stream will throw an error after emitting all data but before
* completing. Issuing a retry at this stage would leave the download in a bad state with
* incorrect count and offset values. Because we have read the intended amount of data, we can
* ignore the error at the end of the stream.
*/
if (newCount == 0) {
LOGGER.warning("Exception encountered in ReliableDownload after all data read from the network "
+ "but before stream signaled completion. Returning success as all data was downloaded. "
+ "Exception message: " + throwable.getMessage());
return Mono.empty();
}
try {
return downloadRange(
new BlobRange(initialOffset + offset, newCount), finalRequestConditions, eTag, getMD5, context);
} catch (Exception e) {
return Mono.error(e);
}
};
return BlobDownloadAsyncResponseConstructorProxy.create(response, onDownloadErrorResume, finalOptions);
});
}
private Mono downloadRange(BlobRange range, BlobRequestConditions requestConditions, String eTag,
Boolean getMD5, Context context) {
return azureBlobStorage.getBlobs().downloadNoCustomHeadersWithResponseAsync(containerName, blobName, snapshot,
versionId, null, range.toHeaderValue(), requestConditions.getLeaseId(), getMD5, null,
requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), eTag,
requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null,
customerProvidedKey, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.
*
* Code Samples
*
*
*
* client.downloadToFile(file).subscribe(response -> System.out.println("Completed download to file"));
*
*
*
* For more information, see the
* Azure Docs
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return A reactive response containing the blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.
*
* Code Samples
*
*
*
* boolean overwrite = false; // Default value
* client.downloadToFile(file, overwrite).subscribe(response -> System.out.println("Completed download to file"));
*
*
*
* For more information, see the
* Azure Docs
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return A reactive response containing the blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono downloadToFile(String filePath, boolean overwrite) {
Set openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); // If the file already exists and it is opened
// for WRITE access, then its length is truncated to 0.
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions)
.flatMap(FluxUtil::toMono);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.
*
* Code Samples
*
*
*
* BlobRange range = new BlobRange(1024, 2048L);
* DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
*
* client.downloadToFileWithResponse(file, range, null, options, null, false)
* .subscribe(response -> System.out.println("Completed download to file"));
*
*
*
* For more information, see the
* Azure Docs
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @return A reactive response containing the blob properties and metadata.
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, options, requestConditions,
rangeGetContentMd5, null);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions}
*
* Code Samples
*
*
*
* BlobRange blobRange = new BlobRange(1024, 2048L);
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions().setMaxRetryRequests(5);
* Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
* StandardOpenOption.WRITE, StandardOpenOption.READ)); // Default options
*
* client.downloadToFileWithResponse(file, blobRange, null, downloadRetryOptions, null, false, openOptions)
* .subscribe(response -> System.out.println("Completed download to file"));
*
*
*
* For more information, see the
* Azure Docs
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @return A reactive response containing the blob properties and metadata.
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions options,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set openOptions) {
try {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return withContext(context ->
downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(options).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
/**
* Downloads the entire blob into a file specified by the path.
*
* By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions}
*
* Code Samples
*
*
*
* client.downloadToFileWithResponse(new BlobDownloadToFileOptions(file)
* .setRange(new BlobRange(1024, 2018L))
* .setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(5))
* .setOpenOptions(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE,
* StandardOpenOption.READ))))
* .subscribe(response -> System.out.println("Completed download to file"));
*
*
*
* For more information, see the
* Azure Docs
*
* @param options {@link BlobDownloadToFileOptions}
* @return A reactive response containing the blob properties and metadata.
* @throws IllegalArgumentException If {@code blockSize} is less than 0 or greater than 4000MB.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> downloadToFileWithResponse(BlobDownloadToFileOptions options) {
try {
return withContext(context -> downloadToFileWithResponse(options, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> downloadToFileWithResponse(BlobDownloadToFileOptions options, Context context) {
StorageImplUtils.assertNotNull("options", options);
BlobRange finalRange = options.getRange() == null ? new BlobRange(0) : options.getRange();
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions());
BlobRequestConditions finalConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
// Default behavior is not to overwrite
Set openOptions = options.getOpenOptions();
if (openOptions == null) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE_NEW);
openOptions.add(StandardOpenOption.WRITE);
openOptions.add(StandardOpenOption.READ);
}
AsynchronousFileChannel channel = downloadToFileResourceSupplier(options.getFilePath(), openOptions);
return Mono.just(channel)
.flatMap(c -> this.downloadToFileImpl(c, finalRange, finalParallelTransferOptions,
options.getDownloadRetryOptions(), finalConditions, options.isRetrieveContentRangeMd5(), context))
.doFinally(signalType -> this.downloadToFileCleanup(channel, options.getFilePath(), signalType));
}
private AsynchronousFileChannel downloadToFileResourceSupplier(String filePath, Set openOptions) {
try {
return AsynchronousFileChannel.open(Paths.get(filePath), openOptions, null);
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
private Mono> downloadToFileImpl(AsynchronousFileChannel file, BlobRange finalRange,
com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions,
DownloadRetryOptions downloadRetryOptions, BlobRequestConditions requestConditions, boolean rangeGetContentMd5,
Context context) {
// See ProgressReporter for an explanation on why this lock is necessary and why we use AtomicLong.
ProgressListener progressReceiver = finalParallelTransferOptions.getProgressListener();
ProgressReporter progressReporter = progressReceiver == null ? null : ProgressReporter.withProgressListener(
progressReceiver);
/*
* Downloads the first chunk and gets the size of the data and etag if not specified by the user.
*/
BiFunction> downloadFunc =
(range, conditions) -> this.downloadStreamWithResponse(range, downloadRetryOptions, conditions,
rangeGetContentMd5, context);
return ChunkedDownloadUtils.downloadFirstChunk(finalRange, finalParallelTransferOptions, requestConditions,
downloadFunc, true)
.flatMap(setupTuple3 -> {
long newCount = setupTuple3.getT1();
BlobRequestConditions finalConditions = setupTuple3.getT2();
int numChunks = ChunkedDownloadUtils.calculateNumBlocks(newCount,
finalParallelTransferOptions.getBlockSizeLong());
// In case it is an empty blob, this ensures we still actually perform a download operation.
numChunks = numChunks == 0 ? 1 : numChunks;
BlobDownloadAsyncResponse initialResponse = setupTuple3.getT3();
return Flux.range(0, numChunks)
.flatMap(chunkNum -> ChunkedDownloadUtils.downloadChunk(chunkNum, initialResponse,
finalRange, finalParallelTransferOptions, finalConditions, newCount, downloadFunc,
response -> writeBodyToFile(response, file, chunkNum, finalParallelTransferOptions,
progressReporter == null ? null : progressReporter.createChild()
).flux()), finalParallelTransferOptions.getMaxConcurrency())
// Only the first download call returns a value.
.then(Mono.just(ModelHelper.buildBlobPropertiesResponse(initialResponse)));
});
}
private static Mono writeBodyToFile(BlobDownloadAsyncResponse response, AsynchronousFileChannel file,
long chunkNum, com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions,
ProgressReporter progressReporter) {
long position = chunkNum * finalParallelTransferOptions.getBlockSizeLong();
return response.writeValueToAsync(IOUtils.toAsynchronousByteChannel(file, position), progressReporter);
}
private void downloadToFileCleanup(AsynchronousFileChannel channel, String filePath, SignalType signalType) {
try {
channel.close();
if (!signalType.equals(SignalType.ON_COMPLETE)) {
Files.deleteIfExists(Paths.get(filePath));
LOGGER.verbose("Downloading to file failed. Cleaning up resources.");
}
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link #deleteIfExistsWithResponse(DeleteSnapshotsOptionType, BlobRequestConditions)} and set
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* Code Samples
*
*
*
* client.delete().doOnSuccess(response -> System.out.println("Completed delete"));
*
*
*
* For more information, see the
* Azure Docs
*
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono delete() {
return deleteWithResponse(null, null).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots set {@code DeleteSnapshotsOptionType}
* to INCLUDE.
*
* Code Samples
*
*
*
* client.deleteWithResponse(DeleteSnapshotsOptionType.INCLUDE, null)
* .subscribe(response -> System.out.printf("Delete completed with status %d%n", response.getStatusCode()));
*
*
*
* For more information, see the
* Azure Docs
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions) {
try {
return withContext(context -> deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
return this.azureBlobStorage.getBlobs().deleteWithResponseAsync(containerName, blobName, snapshot, versionId,
null, requestConditions.getLeaseId(), deleteBlobSnapshotOptions, requestConditions.getIfModifiedSince(),
requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(),
requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, null, context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Deletes the specified blob or snapshot if it exists. To delete a blob with its snapshots use
* {@link #deleteWithResponse(DeleteSnapshotsOptionType, BlobRequestConditions)} and set
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* Code Samples
*
*
*
* client.deleteIfExists().subscribe(deleted -> {
* if (deleted) {
* System.out.println("Successfully deleted.");
* } else {
* System.out.println("Does not exist.");
* }
* });
*
*
*
* For more information, see the
* Azure Docs
*
* @return A reactive response signaling completion. {@code true} indicates that the blob was deleted.
* {@code false} indicates the blob does not exist at this location.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono deleteIfExists() {
return deleteIfExistsWithResponse(null, null).flatMap(FluxUtil::toMono);
}
/**
* Deletes the specified blob or snapshot if it exists. To delete a blob with its snapshots set {@code DeleteSnapshotsOptionType}
* to INCLUDE.
*
* Code Samples
*
*
*
* client.deleteIfExistsWithResponse(DeleteSnapshotsOptionType.INCLUDE, null).subscribe(response -> {
* if (response.getStatusCode() == 404) {
* System.out.println("Does not exist.");
* } else {
* System.out.println("successfully deleted.");
* }
* });
*
*
*
* For more information, see the
* Azure Docs
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signaling completion. If {@link Response}'s status code is 202, the base blob was
* successfully deleted. If status code is 404, the base blob does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> deleteIfExistsWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions) {
try {
return withContext(context -> deleteIfExistsWithResponse(deleteBlobSnapshotOptions,
requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> deleteIfExistsWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
return deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context)
.map(response -> (Response) new SimpleResponse<>(response, true))
.onErrorResume(t -> t instanceof BlobStorageException && ((BlobStorageException) t).getStatusCode() == 404,
t -> {
HttpResponse response = ((BlobStorageException) t).getResponse();
return Mono.just(new SimpleResponse<>(response.getRequest(), response.getStatusCode(),
response.getHeaders(), false));
});
}
/**
* Returns the blob's metadata and properties.
*
* Code Samples
*
*
*
* client.getProperties().subscribe(response ->
* System.out.printf("Type: %s, Size: %d%n", response.getBlobType(), response.getBlobSize()));
*
*
*
* For more information, see the
* Azure Docs
*
* @return A reactive response containing the blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono getProperties() {
return getPropertiesWithResponse(null).flatMap(FluxUtil::toMono);
}
/**
* Returns the blob's metadata and properties.
*
* Code Samples
*
*
*
* BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* client.getPropertiesWithResponse(requestConditions).subscribe(
* response -> System.out.printf("Type: %s, Size: %d%n", response.getValue().getBlobType(),
* response.getValue().getBlobSize()));
*
*
*
* For more information, see the
* Azure Docs
*
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response containing the blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> getPropertiesWithResponse(BlobRequestConditions requestConditions) {
try {
return withContext(context -> getPropertiesWithResponse(requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> getPropertiesWithResponse(BlobRequestConditions requestConditions, Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getBlobs().getPropertiesWithResponseAsync(
containerName, blobName, snapshot, versionId, null, requestConditions.getLeaseId(),
requestConditions.getIfModifiedSince(),
requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(),
requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, customerProvidedKey,
context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(rb -> new SimpleResponse<>(rb, BlobPropertiesConstructorProxy
.create(new BlobPropertiesInternalGetProperties(rb.getDeserializedHeaders()))));
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* Code Samples
*
*
*
* client.setHttpHeaders(new BlobHttpHeaders()
* .setContentLanguage("en-US")
* .setContentType("binary"));
*
*
*
* For more information, see the
* Azure Docs
*
* @param headers {@link BlobHttpHeaders}
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono setHttpHeaders(BlobHttpHeaders headers) {
return setHttpHeadersWithResponse(headers, null).flatMap(FluxUtil::toMono);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* Code Samples
*
*
*
* BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* client.setHttpHeadersWithResponse(new BlobHttpHeaders()
* .setContentLanguage("en-US")
* .setContentType("binary"), requestConditions).subscribe(
* response ->
* System.out.printf("Set HTTP headers completed with status %d%n",
* response.getStatusCode()));
*
*
*
* For more information, see the
* Azure Docs
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> setHttpHeadersWithResponse(BlobHttpHeaders headers,
BlobRequestConditions requestConditions) {
try {
return withContext(context -> setHttpHeadersWithResponse(headers, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
return this.azureBlobStorage.getBlobs().setHttpHeadersWithResponseAsync(
containerName, blobName, null, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(),
requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(),
requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, headers, context)
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* Code Samples
*
*
*
* client.setMetadata(Collections.singletonMap("metadata", "value"));
*
*
*
* For more information, see the
* Azure Docs
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono setMetadata(Map metadata) {
return setMetadataWithResponse(metadata, null).flatMap(FluxUtil::toMono);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* Code Samples
*
*
*
* BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* client.setMetadataWithResponse(Collections.singletonMap("metadata", "value"), requestConditions)
* .subscribe(response -> System.out.printf("Set metadata completed with status %d%n", response.getStatusCode()));
*
*
*
* For more information, see the
* Azure Docs
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @return A reactive response signalling completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono> setMetadataWithResponse(Map metadata,
BlobRequestConditions requestConditions) {
try {
return withContext(context -> setMetadataWithResponse(metadata, requestConditions, context));
} catch (RuntimeException ex) {
return monoError(LOGGER, ex);
}
}
Mono> setMetadataWithResponse(Map metadata, BlobRequestConditions requestConditions,
Context context) {
requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions;
context = context == null ? Context.NONE : context;
return this.azureBlobStorage.getBlobs().setMetadataWithResponseAsync(
containerName, blobName, null, metadata, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(),
requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(),
requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, customerProvidedKey,
encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE))
.map(response -> new SimpleResponse<>(response, null));
}
/**
* Returns the blob's tags.
*
* Code Samples
*
*
*
* client.getTags().subscribe(response ->
* System.out.printf("Num tags: %d%n", response.size()));
*
*
*
* For more information, see the
* Azure Docs
*
* @return A reactive response containing the blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono