com.azure.storage.blob.specialized.BlobClientBase Maven / Gradle / Ivy
Show all versions of azure-storage-blob Show documentation
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.storage.blob.specialized;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceMethod;
import com.azure.core.http.HttpPipeline;
import com.azure.core.http.RequestConditions;
import com.azure.core.http.rest.Response;
import com.azure.core.http.rest.ResponseBase;
import com.azure.core.http.rest.SimpleResponse;
import com.azure.core.util.BinaryData;
import com.azure.core.util.Context;
import com.azure.core.util.FluxUtil;
import com.azure.core.util.logging.ClientLogger;
import com.azure.core.util.polling.SyncPoller;
import com.azure.storage.blob.BlobContainerClient;
import com.azure.storage.blob.BlobServiceClient;
import com.azure.storage.blob.BlobServiceVersion;
import com.azure.storage.blob.implementation.util.ChunkedDownloadUtils;
import com.azure.storage.blob.implementation.util.ModelHelper;
import com.azure.storage.blob.models.AccessTier;
import com.azure.storage.blob.models.BlobCopyInfo;
import com.azure.storage.blob.models.BlobDownloadAsyncResponse;
import com.azure.storage.blob.models.BlobDownloadContentAsyncResponse;
import com.azure.storage.blob.models.BlobDownloadContentResponse;
import com.azure.storage.blob.models.BlobDownloadResponse;
import com.azure.storage.blob.models.BlobHttpHeaders;
import com.azure.storage.blob.models.BlobImmutabilityPolicy;
import com.azure.storage.blob.models.BlobLegalHoldResult;
import com.azure.storage.blob.models.BlobProperties;
import com.azure.storage.blob.models.BlobQueryAsyncResponse;
import com.azure.storage.blob.models.BlobQueryResponse;
import com.azure.storage.blob.models.BlobRange;
import com.azure.storage.blob.models.BlobRequestConditions;
import com.azure.storage.blob.models.BlobSeekableByteChannelReadResult;
import com.azure.storage.blob.models.BlobStorageException;
import com.azure.storage.blob.models.ConsistentReadControl;
import com.azure.storage.blob.models.CpkInfo;
import com.azure.storage.blob.models.CustomerProvidedKey;
import com.azure.storage.blob.models.DeleteSnapshotsOptionType;
import com.azure.storage.blob.models.DownloadRetryOptions;
import com.azure.storage.blob.models.ParallelTransferOptions;
import com.azure.storage.blob.models.RehydratePriority;
import com.azure.storage.blob.models.StorageAccountInfo;
import com.azure.storage.blob.models.UserDelegationKey;
import com.azure.storage.blob.options.BlobBeginCopyOptions;
import com.azure.storage.blob.options.BlobCopyFromUrlOptions;
import com.azure.storage.blob.options.BlobDownloadToFileOptions;
import com.azure.storage.blob.options.BlobGetTagsOptions;
import com.azure.storage.blob.options.BlobInputStreamOptions;
import com.azure.storage.blob.options.BlobQueryOptions;
import com.azure.storage.blob.options.BlobSeekableByteChannelReadOptions;
import com.azure.storage.blob.options.BlobSetAccessTierOptions;
import com.azure.storage.blob.options.BlobSetTagsOptions;
import com.azure.storage.blob.sas.BlobServiceSasSignatureValues;
import com.azure.storage.common.implementation.StorageSeekableByteChannel;
import com.azure.storage.common.StorageSharedKeyCredential;
import com.azure.storage.common.implementation.Constants;
import com.azure.storage.common.implementation.FluxInputStream;
import com.azure.storage.common.implementation.StorageImplUtils;
import com.fasterxml.jackson.databind.util.ByteBufferBackedOutputStream;
import reactor.core.publisher.Mono;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.channels.SeekableByteChannel;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.OpenOption;
import java.nio.file.StandardOpenOption;
import java.time.Duration;
import java.time.OffsetDateTime;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.BiFunction;
import static com.azure.storage.common.implementation.StorageImplUtils.blockWithOptionalTimeout;
/**
* This class provides a client that contains all operations that apply to any blob type.
*
*
* This client offers the ability to download blobs. Note that uploading data is specific to each type of blob. Please
* refer to the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient} for upload options.
*/
public class BlobClientBase {
private static final ClientLogger LOGGER = new ClientLogger(BlobClientBase.class);
private final BlobAsyncClientBase client;
/**
* Constructor used by {@link SpecializedBlobClientBuilder}.
*
* @param client the async blob client
*/
protected BlobClientBase(BlobAsyncClientBase client) {
this.client = client;
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code snapshot} of this blob resource.
*
* @param snapshot the identifier for a specific snapshot of this blob
* @return a {@link BlobClientBase} used to interact with the specific snapshot.
*/
public BlobClientBase getSnapshotClient(String snapshot) {
return new BlobClientBase(client.getSnapshotClient(snapshot));
}
/**
* Creates a new {@link BlobClientBase} linked to the {@code version} of this blob resource.
*
* @param versionId the identifier for a specific version of this blob,
* pass {@code null} to interact with the latest blob version.
* @return a {@link BlobClientBase} used to interact with the specific version.
*/
public BlobClientBase getVersionClient(String versionId) {
return new BlobClientBase(client.getVersionClient(versionId));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code encryptionScope}.
*
* @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope.
* @return a {@link BlobClientBase} with the specified {@code encryptionScope}.
*/
public BlobClientBase getEncryptionScopeClient(String encryptionScope) {
return new BlobClientBase(client.getEncryptionScopeAsyncClient(encryptionScope));
}
/**
* Creates a new {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*
* @param customerProvidedKey the {@link CustomerProvidedKey} for the blob,
* pass {@code null} to use no customer provided key.
* @return a {@link BlobClientBase} with the specified {@code customerProvidedKey}.
*/
public BlobClientBase getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) {
return new BlobClientBase(client.getCustomerProvidedKeyAsyncClient(customerProvidedKey));
}
/**
* Get the url of the storage account.
*
* @return the URL of the storage account
*/
public String getAccountUrl() {
return client.getAccountUrl();
}
/**
* Gets the URL of the blob represented by this client.
*
* @return the URL.
*/
public String getBlobUrl() {
return client.getBlobUrl();
}
/**
* Get associated account name.
*
* @return account name associated with this storage resource.
*/
public String getAccountName() {
return client.getAccountName();
}
/**
* Get the container name.
*
*
Code Samples
*
*
*
* String containerName = client.getContainerName();
* System.out.println("The name of the container is " + containerName);
*
*
*
* @return The name of the container.
*/
public final String getContainerName() {
return client.getContainerName();
}
/**
* Gets a client pointing to the parent container.
*
* Code Samples
*
*
*
* BlobContainerClient containerClient = client.getContainerClient();
* System.out.println("The name of the container is " + containerClient.getBlobContainerName());
*
*
*
* @return {@link BlobContainerClient}
*/
public BlobContainerClient getContainerClient() {
return client.getContainerClientBuilder().buildClient();
}
/**
* Decodes and gets the blob name.
*
* Code Samples
*
*
*
* String blobName = client.getBlobName();
* System.out.println("The name of the blob is " + blobName);
*
*
*
* @return The decoded name of the blob.
*/
public final String getBlobName() {
return client.getBlobName();
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
public HttpPipeline getHttpPipeline() {
return client.getHttpPipeline();
}
/**
* Gets the {@link CpkInfo} used to encrypt this blob's content on the server.
*
* @return the customer provided key used for encryption.
*/
public CpkInfo getCustomerProvidedKey() {
return client.getCustomerProvidedKey();
}
/**
* Gets the {@code encryption scope} used to encrypt this blob's content on the server.
*
* @return the encryption scope used for encryption.
*/
public String getEncryptionScope() {
return client.getEncryptionScope();
}
/**
* Gets the service version the client is using.
*
* @return the service version the client is using.
*/
public BlobServiceVersion getServiceVersion() {
return client.getServiceVersion();
}
/**
* Gets the snapshotId for a blob resource
*
* @return A string that represents the snapshotId of the snapshot blob
*/
public String getSnapshotId() {
return client.getSnapshotId();
}
/**
* Gets the versionId for a blob resource
*
* @return A string that represents the versionId of the snapshot blob
*/
public String getVersionId() {
return client.getVersionId();
}
/**
* Determines if a blob is a snapshot
*
* @return A boolean that indicates if a blob is a snapshot
*/
public boolean isSnapshot() {
return client.isSnapshot();
}
/**
* Opens a blob input stream to download the blob.
*
* @return An InputStream
object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public BlobInputStream openInputStream() {
return openInputStream((BlobRange) null, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param range {@link BlobRange}
* @param requestConditions An {@link BlobRequestConditions} object that represents the access conditions for the
* blob.
* @return An InputStream
object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public BlobInputStream openInputStream(BlobRange range, BlobRequestConditions requestConditions) {
return openInputStream(new BlobInputStreamOptions().setRange(range).setRequestConditions(requestConditions));
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @return An InputStream
object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public BlobInputStream openInputStream(BlobInputStreamOptions options) {
return openInputStream(options, null);
}
/**
* Opens a blob input stream to download the specified range of the blob.
*
* @param options {@link BlobInputStreamOptions}
* @param context {@link Context}
* @return An InputStream
object that represents the stream to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public BlobInputStream openInputStream(BlobInputStreamOptions options, Context context) {
Context contextFinal = context == null ? Context.NONE : context;
options = options == null ? new BlobInputStreamOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
BlobRequestConditions requestConditions = options.getRequestConditions() == null
? new BlobRequestConditions() : options.getRequestConditions();
BlobRange range = options.getRange() == null ? new BlobRange(0) : options.getRange();
int chunkSize = options.getBlockSize() == null ? 4 * Constants.MB : options.getBlockSize();
com.azure.storage.common.ParallelTransferOptions parallelTransferOptions =
new com.azure.storage.common.ParallelTransferOptions().setBlockSizeLong((long) chunkSize);
BiFunction> downloadFunc =
(chunkRange, conditions) -> client.downloadStreamWithResponse(chunkRange, null, conditions, false, contextFinal);
return ChunkedDownloadUtils.downloadFirstChunk(range, parallelTransferOptions, requestConditions, downloadFunc, true)
.flatMap(tuple3 -> {
BlobDownloadAsyncResponse downloadResponse = tuple3.getT3();
return FluxUtil.collectBytesInByteBufferStream(downloadResponse.getValue())
.map(ByteBuffer::wrap)
.zipWith(Mono.just(downloadResponse));
})
.flatMap(tuple2 -> {
ByteBuffer initialBuffer = tuple2.getT1();
BlobDownloadAsyncResponse downloadResponse = tuple2.getT2();
BlobProperties properties = ModelHelper.buildBlobPropertiesResponse(downloadResponse).getValue();
String eTag = properties.getETag();
String versionId = properties.getVersionId();
BlobAsyncClientBase client = this.client;
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
// Target the user specified eTag by default. If not provided, target the latest eTag.
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(eTag);
}
break;
case VERSION_ID:
if (versionId == null) {
return FluxUtil.monoError(LOGGER,
new UnsupportedOperationException("Versioning is not supported on this account."));
} else {
// Target the user specified version by default. If not provided, target the latest version.
if (this.client.getVersionId() == null) {
client = this.client.getVersionClient(versionId);
}
}
break;
default:
return FluxUtil.monoError(LOGGER, new IllegalArgumentException("Concurrency control type not "
+ "supported."));
}
return Mono.just(new BlobInputStream(client, range.getOffset(), range.getCount(), chunkSize, initialBuffer,
requestConditions, properties, contextFinal));
}).block();
}
/**
* Opens a seekable byte channel in read-only mode to download the blob.
*
* @param options {@link BlobSeekableByteChannelReadOptions}
* @param context {@link Context}
* @return A SeekableByteChannel
that represents the channel to use for reading from the blob.
* @throws BlobStorageException If a storage service error occurred.
*/
public BlobSeekableByteChannelReadResult openSeekableByteChannelRead(
BlobSeekableByteChannelReadOptions options, Context context) {
context = context == null ? Context.NONE : context;
options = options == null ? new BlobSeekableByteChannelReadOptions() : options;
ConsistentReadControl consistentReadControl = options.getConsistentReadControl() == null
? ConsistentReadControl.ETAG : options.getConsistentReadControl();
int chunkSize = options.getReadSizeInBytes() == null ? 4 * Constants.MB : options.getReadSizeInBytes();
long initialPosition = options.getInitialPosition() == null ? 0 : options.getInitialPosition();
ByteBuffer initialRange = ByteBuffer.allocate(chunkSize);
BlobProperties properties;
BlobDownloadResponse response;
try (ByteBufferBackedOutputStream dstStream = new ByteBufferBackedOutputStream(initialRange)) {
response = this.downloadStreamWithResponse(dstStream,
new BlobRange(initialPosition, (long) initialRange.remaining()), null /*downloadRetryOptions*/,
options.getRequestConditions(), false, null, context);
properties = ModelHelper.buildBlobPropertiesResponse(response).getValue();
} catch (IOException e) {
throw LOGGER.logExceptionAsError(new UncheckedIOException(e));
}
initialRange.limit(initialRange.position());
initialRange.rewind();
BlobClientBase behaviorClient = this;
BlobRequestConditions requestConditions = options.getRequestConditions();
switch (consistentReadControl) {
case NONE:
break;
case ETAG:
requestConditions = requestConditions != null ? requestConditions : new BlobRequestConditions();
// If etag locking but no explicitly specified etag, use the etag from prefetch
if (requestConditions.getIfMatch() == null) {
requestConditions.setIfMatch(properties.getETag());
}
break;
case VERSION_ID:
if (properties.getVersionId() == null) {
throw LOGGER.logExceptionAsError(
new UnsupportedOperationException(
"Version ID locking unsupported. Versioning is not supported on this account."));
} else {
// If version locking but no explicitly specified version, use the latest version from prefetch
if (getVersionId() == null) {
behaviorClient = this.getVersionClient(properties.getVersionId());
}
}
break;
default:
throw LOGGER.logExceptionAsError(new IllegalArgumentException(
"Concurrency control type " + consistentReadControl + " not supported."));
}
StorageSeekableByteChannelBlobReadBehavior behavior = new StorageSeekableByteChannelBlobReadBehavior(
behaviorClient, initialRange, initialPosition, properties.getBlobSize(), requestConditions);
SeekableByteChannel channel = new StorageSeekableByteChannel(chunkSize, behavior, initialPosition);
return new BlobSeekableByteChannelReadResult(channel, properties);
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* Code Samples
*
*
*
* System.out.printf("Exists? %b%n", client.exists());
*
*
*
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Boolean exists() {
return existsWithResponse(null, Context.NONE).getValue();
}
/**
* Gets if the blob this client represents exists in the cloud.
*
* Code Samples
*
*
*
* System.out.printf("Exists? %b%n", client.existsWithResponse(timeout, new Context(key2, value2)).getValue());
*
*
*
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return true if the blob exists, false if it doesn't
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response existsWithResponse(Duration timeout, Context context) {
Mono> response = client.existsWithResponse(context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Copies the data at the source URL to a blob.
*
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
*
Code Samples
*
*
*
* final SyncPoller<BlobCopyInfo, Void> poller = client.beginCopy(url, Duration.ofSeconds(2));
* PollResponse<BlobCopyInfo> pollResponse = poller.poll();
* System.out.printf("Copy identifier: %s%n", pollResponse.getValue().getCopyId());
*
*
*
* For more information, see the
* Azure Docs
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller beginCopy(String sourceUrl, Duration pollInterval) {
return beginCopy(sourceUrl,
null,
null,
null,
null,
null, pollInterval);
}
/**
* Copies the data at the source URL to a blob.
*
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
*
Code Samples
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* RequestConditions modifiedRequestConditions = new RequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
* SyncPoller<BlobCopyInfo, Void> poller = client.beginCopy(url, metadata, AccessTier.HOT,
* RehydratePriority.STANDARD, modifiedRequestConditions, blobRequestConditions, Duration.ofSeconds(2));
*
* PollResponse<BlobCopyInfo> response = poller.waitUntil(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
* System.out.printf("Copy identifier: %s%n", response.getValue().getCopyId());
*
*
*
* For more information, see the
* Azure Docs
*
* @param sourceUrl The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param priority {@link RehydratePriority} for rehydrating the blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second
* is used.
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller beginCopy(String sourceUrl, Map metadata, AccessTier tier,
RehydratePriority priority, RequestConditions sourceModifiedRequestConditions,
BlobRequestConditions destRequestConditions, Duration pollInterval) {
return this.beginCopy(new BlobBeginCopyOptions(sourceUrl).setMetadata(metadata).setTier(tier)
.setRehydratePriority(priority).setSourceRequestConditions(
ModelHelper.populateBlobSourceRequestConditions(sourceModifiedRequestConditions))
.setDestinationRequestConditions(destRequestConditions).setPollInterval(pollInterval));
}
/**
* Copies the data at the source URL to a blob.
*
* This method triggers a long-running, asynchronous operations. The source may be another blob or an Azure File. If
* the source is in another account, the source must either be public or authenticated with a SAS token. If the
* source is in the same account, the Shared Key authorization on the destination will also be applied to the
* source. The source URL must be URL encoded.
*
*
Code Samples
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* Map<String, String> tags = Collections.singletonMap("tag", "value");
* BlobBeginCopySourceRequestConditions modifiedRequestConditions = new BlobBeginCopySourceRequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
* SyncPoller<BlobCopyInfo, Void> poller = client.beginCopy(new BlobBeginCopyOptions(url).setMetadata(metadata)
* .setTags(tags).setTier(AccessTier.HOT).setRehydratePriority(RehydratePriority.STANDARD)
* .setSourceRequestConditions(modifiedRequestConditions)
* .setDestinationRequestConditions(blobRequestConditions).setPollInterval(Duration.ofSeconds(2)));
*
* PollResponse<BlobCopyInfo> response = poller.waitUntil(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED);
* System.out.printf("Copy identifier: %s%n", response.getValue().getCopyId());
*
*
*
* For more information, see the
* Azure Docs
*
* @param options {@link BlobBeginCopyOptions}
* @return A {@link SyncPoller} to poll the progress of blob copy operation.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public SyncPoller beginCopy(BlobBeginCopyOptions options) {
return client.beginCopy(options).getSyncPoller();
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* Code Samples
*
*
*
* client.abortCopyFromUrl(copyId);
* System.out.println("Aborted copy completed.");
*
*
*
* For more information, see the
* Azure Docs
*
* @param copyId The id of the copy operation to abort.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void abortCopyFromUrl(String copyId) {
abortCopyFromUrlWithResponse(copyId, null, null, Context.NONE);
}
/**
* Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
*
* Code Samples
*
*
*
* System.out.printf("Aborted copy completed with status %d%n",
* client.abortCopyFromUrlWithResponse(copyId, leaseId, timeout,
* new Context(key2, value2)).getStatusCode());
*
*
*
* For more information, see the
* Azure Docs
*
* @param copyId The id of the copy operation to abort.
* @param leaseId The lease ID the active lease on the blob must match.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response abortCopyFromUrlWithResponse(String copyId, String leaseId, Duration timeout,
Context context) {
return blockWithOptionalTimeout(client.abortCopyFromUrlWithResponse(copyId, leaseId, context), timeout);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
*
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
*
Code Samples
*
*
*
* System.out.printf("Copy identifier: %s%n", client.copyFromUrl(url));
*
*
*
* For more information, see the
* Azure Docs
*
* @param copySource The source URL to copy from.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public String copyFromUrl(String copySource) {
return copyFromUrlWithResponse(copySource, null, null, null, null, null, Context.NONE).getValue();
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
*
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
*
Code Samples
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* RequestConditions modifiedRequestConditions = new RequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* System.out.printf("Copy identifier: %s%n",
* client.copyFromUrlWithResponse(url, metadata, AccessTier.HOT, modifiedRequestConditions,
* blobRequestConditions, timeout,
* new Context(key1, value1)).getValue());
*
*
*
* For more information, see the
* Azure Docs
*
* @param copySource The source URL to copy from. URLs outside of Azure may only be copied to block blobs.
* @param metadata Metadata to associate with the destination blob. If there is leading or trailing whitespace in
* any metadata key or value, it must be removed or encoded.
* @param tier {@link AccessTier} for the destination blob.
* @param sourceModifiedRequestConditions {@link RequestConditions} against the source. Standard HTTP Access
* conditions related to the modification of data. ETag and LastModifiedTime are used to construct conditions
* related to when the blob was changed relative to the given request. The request will fail if the specified
* condition is not satisfied.
* @param destRequestConditions {@link BlobRequestConditions} against the destination.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response copyFromUrlWithResponse(String copySource, Map metadata, AccessTier tier,
RequestConditions sourceModifiedRequestConditions, BlobRequestConditions destRequestConditions,
Duration timeout, Context context) {
return this.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(copySource).setMetadata(metadata)
.setTier(tier).setSourceRequestConditions(sourceModifiedRequestConditions)
.setDestinationRequestConditions(destRequestConditions), timeout, context);
}
/**
* Copies the data at the source URL to a blob and waits for the copy to complete before returning a response.
*
* The source must be a block blob no larger than 256MB. The source must also be either public or have a sas token
* attached. The URL must be URL encoded.
*
*
Code Samples
*
*
*
* Map<String, String> metadata = Collections.singletonMap("metadata", "value");
* Map<String, String> tags = Collections.singletonMap("tag", "value");
* RequestConditions modifiedRequestConditions = new RequestConditions()
* .setIfUnmodifiedSince(OffsetDateTime.now().minusDays(7));
* BlobRequestConditions blobRequestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* System.out.printf("Copy identifier: %s%n",
* client.copyFromUrlWithResponse(new BlobCopyFromUrlOptions(url).setMetadata(metadata).setTags(tags)
* .setTier(AccessTier.HOT).setSourceRequestConditions(modifiedRequestConditions)
* .setDestinationRequestConditions(blobRequestConditions), timeout,
* new Context(key1, value1)).getValue());
*
*
*
* For more information, see the
* Azure Docs
*
* @param options {@link BlobCopyFromUrlOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The copy ID for the long running operation.
* @throws IllegalArgumentException If {@code copySource} is a malformed {@link URL}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response copyFromUrlWithResponse(BlobCopyFromUrlOptions options, Duration timeout,
Context context) {
Mono> response = client
.copyFromUrlWithResponse(options, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* client.download(new ByteArrayOutputStream());
* System.out.println("Download completed.");
*
*
*
* For more information, see the
* Azure Docs
*
* This method will be deprecated in the future. Use {@link #downloadStream(OutputStream)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
* @deprecated use {@link #downloadStream(OutputStream)} instead.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public void download(OutputStream stream) {
downloadStream(stream);
}
/**
* Downloads the entire blob into an output stream. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
*
Code Samples
*
*
*
* client.downloadStream(new ByteArrayOutputStream());
* System.out.println("Download completed.");
*
*
*
* For more information, see the
* Azure Docs
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void downloadStream(OutputStream stream) {
downloadWithResponse(stream, null, null, null, false, null, Context.NONE);
}
/**
* Downloads the entire blob. Uploading data must be done from the {@link BlockBlobClient},
* {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* BinaryData data = client.downloadContent();
* System.out.printf("Downloaded %s", data.toString());
*
*
*
* For more information, see the
* Azure Docs
*
* This method supports downloads up to 2GB of data.
* Use {@link #downloadStream(OutputStream)} to download larger blobs.
*
* @return The content of the blob.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BinaryData downloadContent() {
return blockWithOptionalTimeout(client.downloadContent(), null);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* BlobRange range = new BlobRange(1024, 2048L);
* DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
*
* System.out.printf("Download completed with status %d%n",
* client.downloadWithResponse(new ByteArrayOutputStream(), range, options, null, false,
* timeout, new Context(key2, value2)).getStatusCode());
*
*
*
* For more information, see the
* Azure Docs
*
* This method will be deprecated in the future.
* Use {@link #downloadStreamWithResponse(OutputStream, BlobRange, DownloadRetryOptions,
* BlobRequestConditions, boolean, Duration, Context)} instead.
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
* @deprecated use {@link #downloadStreamWithResponse(OutputStream, BlobRange, DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} instead.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
@Deprecated
public BlobDownloadResponse downloadWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
return downloadStreamWithResponse(stream, range,
options, requestConditions, getRangeContentMd5, timeout, context);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
*
Code Samples
*
*
*
* BlobRange range = new BlobRange(1024, 2048L);
* DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
*
* System.out.printf("Download completed with status %d%n",
* client.downloadStreamWithResponse(new ByteArrayOutputStream(), range, options, null, false,
* timeout, new Context(key2, value2)).getStatusCode());
*
*
*
* For more information, see the
* Azure Docs
*
* @param stream A non-null {@link OutputStream} instance where the downloaded data will be written.
* @param range {@link BlobRange}
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param getRangeContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
* @throws UncheckedIOException If an I/O error occurs.
* @throws NullPointerException if {@code stream} is null
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadResponse downloadStreamWithResponse(OutputStream stream, BlobRange range,
DownloadRetryOptions options, BlobRequestConditions requestConditions, boolean getRangeContentMd5,
Duration timeout, Context context) {
StorageImplUtils.assertNotNull("stream", stream);
Mono download = client
.downloadStreamWithResponse(range, options, requestConditions, getRangeContentMd5, context)
.flatMap(response -> FluxUtil.writeToOutputStream(response.getValue(), stream)
.thenReturn(new BlobDownloadResponse(response)));
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads a range of bytes from a blob into an output stream. Uploading data must be done from the {@link
* BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}.
*
* Code Samples
*
*
*
* DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
*
* BlobDownloadContentResponse contentResponse = client.downloadContentWithResponse(options, null,
* timeout, new Context(key2, value2));
* BinaryData content = contentResponse.getValue();
* System.out.printf("Download completed with status %d and content%s%n",
* contentResponse.getStatusCode(), content.toString());
*
*
*
* For more information, see the
* Azure Docs
*
* This method supports downloads up to 2GB of data.
* Use {@link #downloadStreamWithResponse(OutputStream, BlobRange,
* DownloadRetryOptions, BlobRequestConditions, boolean, Duration, Context)} to download larger blobs.
*
* @param options {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobDownloadContentResponse downloadContentWithResponse(
DownloadRetryOptions options, BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono download = client
.downloadStreamWithResponse(null, options, requestConditions, false, context)
.flatMap(r ->
BinaryData.fromFlux(r.getValue())
.map(data ->
new BlobDownloadContentAsyncResponse(
r.getRequest(), r.getStatusCode(),
r.getHeaders(), data,
r.getDeserializedHeaders())
))
.map(BlobDownloadContentResponse::new);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.
*
* Code Samples
*
*
*
* client.downloadToFile(file);
* System.out.println("Completed download to file");
*
*
*
* For more information, see the
* Azure Docs
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath) {
return downloadToFile(filePath, false);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* If overwrite is set to false, the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown.
*
* Code Samples
*
*
*
* boolean overwrite = false; // Default value
* client.downloadToFile(file, overwrite);
* System.out.println("Completed download to file");
*
*
*
* For more information, see the
* Azure Docs
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param overwrite Whether to overwrite the file, should the file exist.
* @return The blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties downloadToFile(String filePath, boolean overwrite) {
Set openOptions = null;
if (overwrite) {
openOptions = new HashSet<>();
openOptions.add(StandardOpenOption.CREATE);
openOptions.add(StandardOpenOption.TRUNCATE_EXISTING); // If the file already exists and it is opened
// for WRITE access, then its length is truncated to 0.
openOptions.add(StandardOpenOption.READ);
openOptions.add(StandardOpenOption.WRITE);
}
return downloadToFileWithResponse(filePath, null, null, null, null, false, openOptions, null, Context.NONE)
.getValue();
}
/**
* Downloads the entire blob into a file specified by the path.
*
* The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException}
* will be thrown.
*
* Code Samples
*
*
*
* BlobRange range = new BlobRange(1024, 2048L);
* DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(5);
*
* client.downloadToFileWithResponse(file, range, new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB),
* options, null, false, timeout, new Context(key2, value2));
* System.out.println("Completed download to file");
*
*
*
* For more information, see the
* Azure Docs
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Duration timeout, Context context) {
return downloadToFileWithResponse(filePath, range, parallelTransferOptions, downloadRetryOptions,
requestConditions, rangeGetContentMd5, null, timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions}
*
* Code Samples
*
*
*
* BlobRange blobRange = new BlobRange(1024, 2048L);
* DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions().setMaxRetryRequests(5);
* Set<OpenOption> openOptions = new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW,
* StandardOpenOption.WRITE, StandardOpenOption.READ)); // Default options
*
* client.downloadToFileWithResponse(file, blobRange, new ParallelTransferOptions().setBlockSizeLong(4L * Constants.MB),
* downloadRetryOptions, null, false, openOptions, timeout, new Context(key2, value2));
* System.out.println("Completed download to file");
*
*
*
* For more information, see the
* Azure Docs
*
* @param filePath A {@link String} representing the filePath where the downloaded data will be written.
* @param range {@link BlobRange}
* @param parallelTransferOptions {@link ParallelTransferOptions} to use to download to file. Number of parallel
* transfers parameter is ignored.
* @param downloadRetryOptions {@link DownloadRetryOptions}
* @param requestConditions {@link BlobRequestConditions}
* @param rangeGetContentMd5 Whether the contentMD5 for the specified blob range should be returned.
* @param openOptions {@link OpenOption OpenOptions} to use to configure how to open or create the file.
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response downloadToFileWithResponse(String filePath, BlobRange range,
ParallelTransferOptions parallelTransferOptions, DownloadRetryOptions downloadRetryOptions,
BlobRequestConditions requestConditions, boolean rangeGetContentMd5, Set openOptions,
Duration timeout, Context context) {
final com.azure.storage.common.ParallelTransferOptions finalParallelTransferOptions =
ModelHelper.wrapBlobOptions(ModelHelper.populateAndApplyDefaults(parallelTransferOptions));
return downloadToFileWithResponse(new BlobDownloadToFileOptions(filePath).setRange(range)
.setParallelTransferOptions(finalParallelTransferOptions)
.setDownloadRetryOptions(downloadRetryOptions).setRequestConditions(requestConditions)
.setRetrieveContentRangeMd5(rangeGetContentMd5).setOpenOptions(openOptions), timeout, context);
}
/**
* Downloads the entire blob into a file specified by the path.
*
* By default the file will be created and must not exist, if the file already exists a
* {@link FileAlreadyExistsException} will be thrown. To override this behavior, provide appropriate
* {@link OpenOption OpenOptions}
*
* Code Samples
*
*
*
* client.downloadToFileWithResponse(new BlobDownloadToFileOptions(file)
* .setRange(new BlobRange(1024, 2018L))
* .setDownloadRetryOptions(new DownloadRetryOptions().setMaxRetryRequests(5))
* .setOpenOptions(new HashSet<>(Arrays.asList(StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE,
* StandardOpenOption.READ))), timeout, new Context(key2, value2));
* System.out.println("Completed download to file");
*
*
*
* For more information, see the
* Azure Docs
*
* @param options {@link BlobDownloadToFileOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing the blob properties and metadata.
* @throws UncheckedIOException If an I/O error occurs.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response downloadToFileWithResponse(BlobDownloadToFileOptions options, Duration timeout,
Context context) {
Mono> download = client.downloadToFileWithResponse(options, context);
return blockWithOptionalTimeout(download, timeout);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link #deleteWithResponse(DeleteSnapshotsOptionType, BlobRequestConditions, Duration, Context)} and set
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* Code Samples
*
*
*
* client.delete();
* System.out.println("Delete completed.");
*
*
*
* For more information, see the
* Azure Docs
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete() {
deleteWithResponse(null, null, null, Context.NONE);
}
/**
* Deletes the specified blob or snapshot. To delete a blob with its snapshots use
* {@link #deleteWithResponse(DeleteSnapshotsOptionType, BlobRequestConditions, Duration, Context)} and set
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* Code Samples
*
*
*
* System.out.printf("Delete completed with status %d%n",
* client.deleteWithResponse(DeleteSnapshotsOptionType.INCLUDE, null, timeout,
* new Context(key1, value1)).getStatusCode());
*
*
*
* For more information, see the
* Azure Docs
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response deleteWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
Mono> response = client
.deleteWithResponse(deleteBlobSnapshotOptions, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Deletes the specified blob or snapshot if it exists. To delete a blob with its snapshots use
* {@link #deleteIfExistsWithResponse(DeleteSnapshotsOptionType, BlobRequestConditions, Duration, Context)} and set
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* Code Samples
*
*
*
* boolean result = client.deleteIfExists();
* System.out.println("Delete completed: " + result);
*
*
*
* For more information, see the
* Azure Docs
* @return {@code true} if delete succeeds, or {@code false} if blob does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public boolean deleteIfExists() {
return deleteIfExistsWithResponse(null, null, null, Context.NONE).getValue();
}
/**
* Deletes the specified blob or snapshot if it exists. To delete a blob with its snapshots use
* {@link #deleteIfExistsWithResponse(DeleteSnapshotsOptionType, BlobRequestConditions, Duration, Context)} and set
* {@code DeleteSnapshotsOptionType} to INCLUDE.
*
* Code Samples
*
*
*
* Response<Boolean> response = client.deleteIfExistsWithResponse(DeleteSnapshotsOptionType.INCLUDE, null, timeout,
* new Context(key1, value1));
* if (response.getStatusCode() == 404) {
* System.out.println("Does not exist.");
* } else {
* System.out.printf("Delete completed with status %d%n", response.getStatusCode());
* }
*
*
*
* For more information, see the
* Azure Docs
*
* @param deleteBlobSnapshotOptions Specifies the behavior for deleting the snapshots on this blob. {@code Include}
* will delete the base blob and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being
* deleted, you must pass null.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers. If {@link Response}'s status code is 202, the base
* blob was successfully deleted. If status code is 404, the base blob does not exist.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response deleteIfExistsWithResponse(DeleteSnapshotsOptionType deleteBlobSnapshotOptions,
BlobRequestConditions requestConditions, Duration timeout, Context context) {
return blockWithOptionalTimeout(client.deleteIfExistsWithResponse(deleteBlobSnapshotOptions,
requestConditions, context), timeout);
}
/**
* Returns the blob's metadata and properties.
*
* Code Samples
*
*
*
* BlobProperties properties = client.getProperties();
* System.out.printf("Type: %s, Size: %d%n", properties.getBlobType(), properties.getBlobSize());
*
*
*
* For more information, see the
* Azure Docs
*
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public BlobProperties getProperties() {
return getPropertiesWithResponse(null, null, Context.NONE).getValue();
}
/**
* Returns the blob's metadata and properties.
*
* Code Samples
*
*
*
* BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* BlobProperties properties = client.getPropertiesWithResponse(requestConditions, timeout,
* new Context(key2, value2)).getValue();
* System.out.printf("Type: %s, Size: %d%n", properties.getBlobType(), properties.getBlobSize());
*
*
*
* For more information, see the
* Azure Docs
*
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob properties and metadata.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout,
Context context) {
Mono> response = client.getPropertiesWithResponse(requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* Code Samples
*
*
*
* client.setHttpHeaders(new BlobHttpHeaders()
* .setContentLanguage("en-US")
* .setContentType("binary"));
* System.out.println("Set HTTP headers completed");
*
*
*
* For more information, see the
* Azure Docs
*
* @param headers {@link BlobHttpHeaders}
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setHttpHeaders(BlobHttpHeaders headers) {
setHttpHeadersWithResponse(headers, null, null, Context.NONE);
}
/**
* Changes a blob's HTTP header properties. if only one HTTP header is updated, the others will all be erased. In
* order to preserve existing values, they must be passed alongside the header being changed.
*
* Code Samples
*
*
*
* BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* System.out.printf("Set HTTP headers completed with status %d%n",
* client.setHttpHeadersWithResponse(new BlobHttpHeaders()
* .setContentLanguage("en-US")
* .setContentType("binary"), requestConditions, timeout, new Context(key1, value1))
* .getStatusCode());
*
*
*
* For more information, see the
* Azure Docs
*
* @param headers {@link BlobHttpHeaders}
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response setHttpHeadersWithResponse(BlobHttpHeaders headers, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono> response = client
.setHttpHeadersWithResponse(headers, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* Code Samples
*
*
*
* client.setMetadata(Collections.singletonMap("metadata", "value"));
* System.out.println("Set metadata completed");
*
*
*
* For more information, see the
* Azure Docs
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void setMetadata(Map metadata) {
setMetadataWithResponse(metadata, null, null, Context.NONE);
}
/**
* Changes a blob's metadata. The specified metadata in this method will replace existing metadata. If old values
* must be preserved, they must be downloaded and included in the call to this method.
*
* Code Samples
*
*
*
* BlobRequestConditions requestConditions = new BlobRequestConditions().setLeaseId(leaseId);
*
* System.out.printf("Set metadata completed with status %d%n",
* client.setMetadataWithResponse(Collections.singletonMap("metadata", "value"), requestConditions, timeout,
* new Context(key1, value1)).getStatusCode());
*
*
*
* For more information, see the
* Azure Docs
*
* @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any
* metadata key or value, it must be removed or encoded.
* @param requestConditions {@link BlobRequestConditions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return A response containing status code and HTTP headers.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response setMetadataWithResponse(Map metadata, BlobRequestConditions requestConditions,
Duration timeout, Context context) {
Mono> response = client.setMetadataWithResponse(metadata, requestConditions, context);
return blockWithOptionalTimeout(response, timeout);
}
/**
* Returns the blob's tags.
*
* Code Samples
*
*
*
* Map<String, String> tags = client.getTags();
* System.out.printf("Number of tags: %d%n", tags.size());
*
*
*
* For more information, see the
* Azure Docs
*
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Map getTags() {
return this.getTagsWithResponse(new BlobGetTagsOptions(), null, Context.NONE).getValue();
}
/**
* Returns the blob's tags.
*
* Code Samples
*
*
*
* Map<String, String> tags = client.getTagsWithResponse(new BlobGetTagsOptions(), timeout,
* new Context(key1, value1)).getValue();
* System.out.printf("Number of tags: %d%n", tags.size());
*
*
*
* For more information, see the
* Azure Docs
*
* @param options {@link BlobGetTagsOptions}
* @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised.
* @param context Additional context that is passed through the Http pipeline during the service call.
* @return The blob's tags.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Response