org.apache.hadoop.hdfs.qjournal.client.AsyncLoggerSet Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of hadoop-apache Show documentation
Show all versions of hadoop-apache Show documentation
Shaded version of Apache Hadoop for Presto
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.qjournal.client;
import java.io.IOException;
import java.net.URL;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeoutException;
import io.prestosql.hadoop.$internal.org.apache.commons.logging.Log;
import io.prestosql.hadoop.$internal.org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import io.prestosql.hadoop.$internal.com.google.common.annotations.VisibleForTesting;
import io.prestosql.hadoop.$internal.com.google.common.base.Joiner;
import io.prestosql.hadoop.$internal.com.google.common.base.Preconditions;
import io.prestosql.hadoop.$internal.com.google.common.collect.ImmutableList;
import io.prestosql.hadoop.$internal.com.google.common.collect.Maps;
import io.prestosql.hadoop.$internal.com.google.common.util.concurrent.ListenableFuture;
/**
* Wrapper around a set of Loggers, taking care of fanning out
* calls to the underlying loggers and constructing corresponding
* {@link QuorumCall} instances.
*/
class AsyncLoggerSet {
static final Log LOG = LogFactory.getLog(AsyncLoggerSet.class);
private final List loggers;
private static final long INVALID_EPOCH = -1;
private long myEpoch = INVALID_EPOCH;
public AsyncLoggerSet(List loggers) {
this.loggers = ImmutableList.copyOf(loggers);
}
void setEpoch(long e) {
Preconditions.checkState(!isEpochEstablished(),
"Epoch already established: epoch=%s", myEpoch);
myEpoch = e;
for (AsyncLogger l : loggers) {
l.setEpoch(e);
}
}
/**
* Set the highest successfully committed txid seen by the writer.
* This should be called after a successful write to a quorum, and is used
* for extra sanity checks against the protocol. See HDFS-3863.
*/
public void setCommittedTxId(long txid) {
for (AsyncLogger logger : loggers) {
logger.setCommittedTxId(txid);
}
}
/**
* @return true if an epoch has been established.
*/
boolean isEpochEstablished() {
return myEpoch != INVALID_EPOCH;
}
/**
* @return the epoch number for this writer. This may only be called after
* a successful call to {@link #createNewUniqueEpoch(NamespaceInfo)}.
*/
long getEpoch() {
Preconditions.checkState(myEpoch != INVALID_EPOCH,
"No epoch created yet");
return myEpoch;
}
/**
* Close all of the underlying loggers.
*/
void close() {
for (AsyncLogger logger : loggers) {
logger.close();
}
}
void purgeLogsOlderThan(long minTxIdToKeep) {
for (AsyncLogger logger : loggers) {
logger.purgeLogsOlderThan(minTxIdToKeep);
}
}
/**
* Wait for a quorum of loggers to respond to the given call. If a quorum
* can't be achieved, throws a QuorumException.
* @param q the quorum call
* @param timeoutMs the number of millis to wait
* @param operationName textual description of the operation, for logging
* @return a map of successful results
* @throws QuorumException if a quorum doesn't respond with success
* @throws IOException if the thread is interrupted or times out
*/
Map waitForWriteQuorum(QuorumCall q,
int timeoutMs, String operationName) throws IOException {
int majority = getMajoritySize();
try {
q.waitFor(
loggers.size(), // either all respond
majority, // or we get a majority successes
majority, // or we get a majority failures,
timeoutMs, operationName);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Interrupted waiting " + timeoutMs + "ms for a " +
"quorum of nodes to respond.");
} catch (TimeoutException e) {
throw new IOException("Timed out waiting " + timeoutMs + "ms for a " +
"quorum of nodes to respond.");
}
if (q.countSuccesses() < majority) {
q.rethrowException("Got too many exceptions to achieve quorum size " +
getMajorityString());
}
return q.getResults();
}
/**
* @return the number of nodes which are required to obtain a quorum.
*/
int getMajoritySize() {
return loggers.size() / 2 + 1;
}
/**
* @return a textual description of the majority size (eg "2/3" or "3/5")
*/
String getMajorityString() {
return getMajoritySize() + "/" + loggers.size();
}
/**
* @return the number of loggers behind this set
*/
int size() {
return loggers.size();
}
@Override
public String toString() {
return "[" + Joiner.on(", ").join(loggers) + "]";
}
/**
* Append an HTML-formatted status readout on the current
* state of the underlying loggers.
* @param sb the StringBuilder to append to
*/
void appendReport(StringBuilder sb) {
for (int i = 0, len = loggers.size(); i < len; ++i) {
AsyncLogger l = loggers.get(i);
if (i != 0) {
sb.append(", ");
}
sb.append(l).append(" (");
l.appendReport(sb);
sb.append(")");
}
}
/**
* @return the (mutable) list of loggers, for use in tests to
* set up spies
*/
@VisibleForTesting
List getLoggersForTests() {
return loggers;
}
///////////////////////////////////////////////////////////////////////////
// The rest of this file is simply boilerplate wrappers which fan-out the
// various IPC calls to the underlying AsyncLoggers and wrap the result
// in a QuorumCall.
///////////////////////////////////////////////////////////////////////////
public QuorumCall getJournalState() {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
calls.put(logger, logger.getJournalState());
}
return QuorumCall.create(calls);
}
public QuorumCall isFormatted() {
Map> calls = Maps.newHashMap();
for (AsyncLogger logger : loggers) {
calls.put(logger, logger.isFormatted());
}
return QuorumCall.create(calls);
}
public QuorumCall newEpoch(
NamespaceInfo nsInfo,
long epoch) {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
calls.put(logger, logger.newEpoch(epoch));
}
return QuorumCall.create(calls);
}
public QuorumCall startLogSegment(
long txid, int layoutVersion) {
Map> calls = Maps.newHashMap();
for (AsyncLogger logger : loggers) {
calls.put(logger, logger.startLogSegment(txid, layoutVersion));
}
return QuorumCall.create(calls);
}
public QuorumCall finalizeLogSegment(long firstTxId,
long lastTxId) {
Map> calls = Maps.newHashMap();
for (AsyncLogger logger : loggers) {
calls.put(logger, logger.finalizeLogSegment(firstTxId, lastTxId));
}
return QuorumCall.create(calls);
}
public QuorumCall sendEdits(
long segmentTxId, long firstTxnId, int numTxns, byte[] data) {
Map> calls = Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.sendEdits(segmentTxId, firstTxnId, numTxns, data);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
public QuorumCall getEditLogManifest(
long fromTxnId, boolean inProgressOk) {
Map> calls
= Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.getEditLogManifest(fromTxnId, inProgressOk);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
QuorumCall
prepareRecovery(long segmentTxId) {
Map> calls
= Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.prepareRecovery(segmentTxId);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
QuorumCall
acceptRecovery(SegmentStateProto log, URL fromURL) {
Map> calls
= Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.acceptRecovery(log, fromURL);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
QuorumCall format(NamespaceInfo nsInfo) {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.format(nsInfo);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
public QuorumCall discardSegments(long startTxId) {
Map> calls = Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future = logger.discardSegments(startTxId);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
QuorumCall doPreUpgrade() {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.doPreUpgrade();
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
public QuorumCall doUpgrade(StorageInfo sInfo) {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.doUpgrade(sInfo);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
public QuorumCall doFinalize() {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.doFinalize();
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
public QuorumCall canRollBack(StorageInfo storage,
StorageInfo prevStorage, int targetLayoutVersion) {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.canRollBack(storage, prevStorage, targetLayoutVersion);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
public QuorumCall doRollback() {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future =
logger.doRollback();
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
public QuorumCall getJournalCTime() {
Map> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture future = logger.getJournalCTime();
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
}