com.microsoft.azure.storage.blob.RequestRetryFactory Maven / Gradle / Ivy
Show all versions of azure-storage-blob Show documentation
/*
* Copyright Microsoft Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.microsoft.azure.storage.blob;
import com.microsoft.azure.storage.blob.models.StorageErrorException;
import com.microsoft.rest.v2.http.*;
import com.microsoft.rest.v2.policy.RequestPolicy;
import com.microsoft.rest.v2.policy.RequestPolicyFactory;
import com.microsoft.rest.v2.policy.RequestPolicyOptions;
import io.netty.channel.ChannelException;
import io.reactivex.Flowable;
import io.reactivex.Single;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* This is a factory which creates policies in an {@link HttpPipeline} for retrying a given HTTP request. The request
* that is retried will be identical each time it is reissued. In most cases, it is sufficient to configure a {@link
* RequestRetryOptions} object and set those as a field on a {@link PipelineOptions} object to configure a default
* pipeline. Retries will try against a secondary if one is specified and the type of operation/error indicates that the
* secondary can handle the request. Exponential and fixed backoff are supported. The factory and policy must only be
* used directly when creating a custom pipeline.
*/
public final class RequestRetryFactory implements RequestPolicyFactory {
private final RequestRetryOptions requestRetryOptions;
/**
* Creates a factory capable of generating RequestRetry policies for the {@link HttpPipeline}.
*
* @param requestRetryOptions
* {@link RequestRetryOptions}
*/
public RequestRetryFactory(RequestRetryOptions requestRetryOptions) {
this.requestRetryOptions = requestRetryOptions == null ? RequestRetryOptions.DEFAULT : requestRetryOptions;
}
private final class RequestRetryPolicy implements RequestPolicy {
private final RequestPolicy nextPolicy;
private final RequestRetryOptions requestRetryOptions;
// TODO: It looked like there was some stuff in here to log how long the operation took. Do we want that?
private RequestRetryPolicy(RequestPolicy nextPolicy, RequestRetryOptions requestRetryOptions) {
this.nextPolicy = nextPolicy;
this.requestRetryOptions = requestRetryOptions;
}
@Override
public Single sendAsync(HttpRequest httpRequest) {
boolean considerSecondary = (httpRequest.httpMethod().equals(HttpMethod.GET) ||
httpRequest.httpMethod().equals(HttpMethod.HEAD))
&& (this.requestRetryOptions.getSecondaryHost() != null);
return this.attemptAsync(httpRequest, 1, considerSecondary, 1);
}
// This is to log for debugging purposes only. Comment/uncomment as necessary for releasing/debugging.
private void logf(String s, Object... args) {
//System.out.println(String.format(s, args));
}
/**
* This method actually attempts to send the request and determines if we should attempt again and, if so, how
* long to wait before sending out the next request.
*
* Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) When to retry: connection failure
* or an HTTP status code of 500 or greater, except 501 and 505 If using a secondary: Odd tries go against
* primary; even tries go against the secondary For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8,
* 1.2) If secondary gets a 404, don't fail, retry but future retries are only against the primary When retrying
* against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
*
* @param httpRequest
* The request to try.
* @param primaryTry
* This indicates how man tries we've attempted against the primary DC.
* @param considerSecondary
* Before each try, we'll select either the primary or secondary URL if appropriate.
* @param attempt
* This indicates the total number of attempts to send the request.
* @return A single containing either the successful response or an error that was not retryable because either
* the maxTries was exceeded or retries will not mitigate the issue.
*/
private Single attemptAsync(final HttpRequest httpRequest, final int primaryTry,
final boolean considerSecondary,
final int attempt) {
logf("\n=====> Try=%d\n", attempt);
// Determine which endpoint to try. It's primary if there is no secondary or if it is an odd number attempt.
final boolean tryingPrimary = !considerSecondary || (attempt % 2 == 1);
// Select the correct host and delay.
long delayMs;
if (tryingPrimary) {
// The first attempt returns 0 delay.
delayMs = this.requestRetryOptions.calculateDelayInMs(primaryTry);
logf("Primary try=%d, Delay=%d\n", primaryTry, delayMs);
} else {
// Delay with some jitter before trying the secondary.
delayMs = (long) ((ThreadLocalRandom.current().nextFloat() / 2 + 0.8) * 1000); // Add jitter
logf("Secondary try=%d, Delay=%d\n", attempt - primaryTry, delayMs);
}
/*
Clone the original request to ensure that each try starts with the original (unmutated) request. We cannot
simply call httpRequest.buffer() because although the body will start emitting from the beginning of the
stream, the buffers that were emitted will have already been consumed (their position set to their limit),
so it is not a true reset. By adding the map function, we ensure that anything which consumes the
ByteBuffers downstream will only actually consume a duplicate so the original is preserved. This only
duplicates the ByteBuffer object, not the underlying data.
*/
HttpHeaders bufferedHeaders = new HttpHeaders(httpRequest.headers());
Flowable bufferedBody = httpRequest.body() == null ?
null : httpRequest.body().map(ByteBuffer::duplicate);
final HttpRequest requestCopy = new HttpRequest(httpRequest.callerMethod(), httpRequest.httpMethod(),
httpRequest.url(), bufferedHeaders, bufferedBody, httpRequest.responseDecoder());
if (!tryingPrimary) {
UrlBuilder builder = UrlBuilder.parse(requestCopy.url());
builder.withHost(this.requestRetryOptions.getSecondaryHost());
try {
requestCopy.withUrl(builder.toURL());
} catch (MalformedURLException e) {
return Single.error(e);
}
}
// Deadline stuff
/*
We want to send the request with a given timeout, but we don't want to kickoff that timeout-bound operation
until after the retry backoff delay, so we call delaySubscription.
*/
return this.nextPolicy.sendAsync(requestCopy)
.timeout(this.requestRetryOptions.getTryTimeout(), TimeUnit.SECONDS)
.delaySubscription(delayMs, TimeUnit.MILLISECONDS)
.flatMap(response -> {
boolean newConsiderSecondary = considerSecondary;
String action;
int statusCode = response.statusCode();
/*
If attempt was against the secondary & it returned a StatusNotFound (404), then the
resource was not found. This may be due to replication delay. So, in this case,
we'll never try the secondary again for this operation.
*/
if (!tryingPrimary && statusCode == 404) {
newConsiderSecondary = false;
action = "Retry: Secondary URL returned 404";
} else if (statusCode == 503 || statusCode == 500) {
action = "Retry: Temporary error or server timeout";
} else {
action = "NoRetry: Successful HTTP request";
}
logf("Action=%s\n", action);
if (action.charAt(0) == 'R' && attempt < requestRetryOptions.getMaxTries()) {
/*
We increment primaryTry if we are about to try the primary again (which is when we
consider the secondary and tried the secondary this time (tryingPrimary==false) or
we do not consider the secondary at all (considerSecondary==false)). This will
ensure primaryTry is correct when passed to calculate the delay.
*/
int newPrimaryTry = !tryingPrimary || !considerSecondary ?
primaryTry + 1 : primaryTry;
return attemptAsync(httpRequest, newPrimaryTry, newConsiderSecondary,
attempt + 1);
}
return Single.just(response);
})
.onErrorResumeNext(throwable -> {
String action;
/*
ChannelException: A RuntimeException which is thrown when an I/O operation fails.
ClosedChannelException: Thrown when an attempt is made to invoke or complete an I/O operation
upon channel that is closed.
SocketException: Thrown to indicate that there is an error creating or accessing a Socket.
SocketTimeoutException: Signals that a timeout has occurred on a socket read or accept.
A Timeout Exception is a client-side timeout coming from Rx.
*/
if (throwable instanceof ChannelException ||
throwable instanceof ClosedChannelException ||
throwable instanceof SocketException ||
throwable instanceof SocketTimeoutException) {
action = "Retry: Network error";
} else if (throwable instanceof TimeoutException) {
action = "Retry: Client timeout";
} else {
action = "NoRetry: Unknown error";
}
logf("Action=%s\n", action);
if (action.charAt(0) == 'R' && attempt < requestRetryOptions.getMaxTries()) {
/*
We increment primaryTry if we are about to try the primary again (which is when we
consider the secondary and tried the secondary this time (tryingPrimary==false) or
we do not consider the secondary at all (considerSecondary==false)). This will
ensure primaryTry is correct when passed to calculate the delay.
*/
int newPrimaryTry = !tryingPrimary || !considerSecondary ?
primaryTry + 1 : primaryTry;
return attemptAsync(httpRequest, newPrimaryTry, considerSecondary,
attempt + 1);
}
return Single.error(throwable);
});
}
}
@Override
public RequestPolicy create(RequestPolicy next, RequestPolicyOptions options) {
return new RequestRetryPolicy(next, this.requestRetryOptions);
}
}