Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/* This file is part of VoltDB.
* Copyright (C) 2008-2018 VoltDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with VoltDB. If not, see .
*/
package org.voltdb.iv2;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Queue;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;
import org.voltcore.logging.VoltLogger;
import org.voltcore.messaging.HostMessenger;
import org.voltcore.messaging.Mailbox;
import org.voltcore.messaging.TransactionInfoBaseMessage;
import org.voltcore.messaging.VoltMessage;
import org.voltcore.utils.CoreUtils;
import org.voltdb.ClientResponseImpl;
import org.voltdb.CommandLog;
import org.voltdb.CommandLog.DurabilityListener;
import org.voltdb.RealVoltDB;
import org.voltdb.SnapshotCompletionInterest;
import org.voltdb.SnapshotCompletionMonitor;
import org.voltdb.SystemProcedureCatalog;
import org.voltdb.VoltDB;
import org.voltdb.VoltDBInterface;
import org.voltdb.VoltTable;
import org.voltdb.client.ClientResponse;
import org.voltdb.dtxn.TransactionState;
import org.voltdb.exceptions.SerializableException;
import org.voltdb.exceptions.TransactionRestartException;
import org.voltdb.iv2.SiteTasker.SiteTaskerRunnable;
import org.voltdb.messaging.BorrowTaskMessage;
import org.voltdb.messaging.CompleteTransactionMessage;
import org.voltdb.messaging.CompleteTransactionResponseMessage;
import org.voltdb.messaging.DummyTransactionResponseMessage;
import org.voltdb.messaging.DummyTransactionTaskMessage;
import org.voltdb.messaging.DumpMessage;
import org.voltdb.messaging.DumpPlanThenExitMessage;
import org.voltdb.messaging.FragmentResponseMessage;
import org.voltdb.messaging.FragmentTaskMessage;
import org.voltdb.messaging.InitiateResponseMessage;
import org.voltdb.messaging.Iv2InitiateTaskMessage;
import org.voltdb.messaging.Iv2LogFaultMessage;
import org.voltdb.messaging.MPBacklogFlushMessage;
import org.voltdb.messaging.MultiPartitionParticipantMessage;
import org.voltdb.messaging.RepairLogTruncationMessage;
import org.voltdb.utils.MiscUtils;
import org.voltdb.utils.VoltTrace;
import com.google_voltpatches.common.collect.Sets;
import com.google_voltpatches.common.primitives.Ints;
import com.google_voltpatches.common.primitives.Longs;
import com.google_voltpatches.common.util.concurrent.ListenableFuture;
import com.google_voltpatches.common.util.concurrent.SettableFuture;
public class SpScheduler extends Scheduler implements SnapshotCompletionInterest
{
static final VoltLogger tmLog = new VoltLogger("TM");
static final VoltLogger hostLog = new VoltLogger("HOST");
static class DuplicateCounterKey implements Comparable {
private final long m_txnId;
private final long m_spHandle;
DuplicateCounterKey(long txnId, long spHandle) {
m_txnId = txnId;
m_spHandle = spHandle;
}
@Override
public boolean equals(Object o) {
try {
DuplicateCounterKey other = (DuplicateCounterKey) o;
return (m_txnId == other.m_txnId && m_spHandle == other.m_spHandle);
}
catch (Exception e) {
return false;
}
}
// Only care about comparing TXN ID part for sorting in updateReplicas
@Override
public int compareTo(DuplicateCounterKey o) {
if (m_txnId < o.m_txnId) {
return -1;
} else if (m_txnId > o.m_txnId) {
return 1;
} else {
if (m_spHandle < o.m_spHandle) {
return -1;
}
else if (m_spHandle > o.m_spHandle) {
return 1;
}
else {
return 0;
}
}
}
@Override
public int hashCode() {
assert(false) : "Hashing this is unsafe as it can't promise no collisions.";
throw new UnsupportedOperationException(
"Hashing this is unsafe as it can't promise no collisions.");
}
@Override
public String toString() {
return "[txn:" + TxnEgo.txnIdToString(m_txnId) + "(" + m_txnId + "), spHandle:" + TxnEgo.txnIdToString(m_spHandle) + "(" + m_spHandle + ")]";
}
public boolean isSpTransaction() {
return (TxnEgo.getPartitionId(m_txnId) != MpInitiator.MP_INIT_PID);
}
};
public interface DurableUniqueIdListener {
/**
* Notify listener of last durable Single-Part and Multi-Part uniqueIds
*/
public void lastUniqueIdsMadeDurable(long spUniqueId, long mpUniqueId);
}
private List m_replicaHSIds = new ArrayList<>();
long m_sendToHSIds[] = new long[0];
private final TransactionTaskQueue m_pendingTasks;
private final Map m_outstandingTxns =
new HashMap();
private final TreeMap m_duplicateCounters =
new TreeMap();
// MP fragment tasks or completion tasks pending durability
private final Map> m_mpsPendingDurability =
new HashMap>();
private CommandLog m_cl;
private final SnapshotCompletionMonitor m_snapMonitor;
private BufferedReadLog m_bufferedReadLog = null;
// Need to track when command log replay is complete (even if not performed) so that
// we know when we can start writing viable replay sets to the fault log.
boolean m_replayComplete = false;
// The DurabilityListener is not thread-safe. Access it only on the Site thread.
private final DurabilityListener m_durabilityListener;
// Generator of pre-IV2ish timestamp based unique IDs
private final UniqueIdGenerator m_uniqueIdGenerator;
// the current not-needed-any-more point of the repair log.
long m_repairLogTruncationHandle = Long.MIN_VALUE;
// the truncation handle last sent to the replicas
long m_lastSentTruncationHandle = Long.MIN_VALUE;
// the max schedule transaction sphandle, multi-fragments mp txn counts one
long m_maxScheduledTxnSpHandle = Long.MIN_VALUE;
// the checkpoint transaction sphandle upon MigratePartitionLeader is initiated
long m_migratePartitionLeaderCheckPoint = Long.MIN_VALUE;
//The RepairLog is the same instance as the one initialized in InitiatorMailbox.
//Iv2IniatiateTaskMessage, FragmentTaskMessage and CompleteTransactionMessage
//are to be added to the repair log when these messages get updated transaction ids.
protected RepairLog m_repairLog;
private final boolean IS_KSAFE_CLUSTER;
SpScheduler(int partitionId, SiteTaskerQueue taskQueue, SnapshotCompletionMonitor snapMonitor, boolean scoreboardEnabled)
{
super(partitionId, taskQueue);
m_pendingTasks = new TransactionTaskQueue(m_tasks, scoreboardEnabled);
m_snapMonitor = snapMonitor;
m_durabilityListener = new SpDurabilityListener(this, m_pendingTasks);
m_uniqueIdGenerator = new UniqueIdGenerator(partitionId, 0);
m_bufferedReadLog = new BufferedReadLog();
m_repairLogTruncationHandle = getCurrentTxnId();
// initialized as current txn id in order to release the initial reads into the system
m_maxScheduledTxnSpHandle = getCurrentTxnId();
IS_KSAFE_CLUSTER = VoltDB.instance().getKFactor() > 0;
}
public void initializeScoreboard(int siteId, Mailbox mailBox) {
m_pendingTasks.initializeScoreboard(siteId, mailBox);
}
@Override
public void setLeaderState(boolean isLeader)
{
super.setLeaderState(isLeader);
m_snapMonitor.addInterest(this);
VoltDBInterface db = VoltDB.instance();
if (isLeader && db instanceof RealVoltDB ) {
SpInitiator init = (SpInitiator)((RealVoltDB)db).getInitiator(m_partitionId);
if (init.m_term != null) {
((SpTerm)init.m_term).setPromoting(false);
}
}
}
@Override
public void setMaxSeenTxnId(long maxSeenTxnId)
{
super.setMaxSeenTxnId(maxSeenTxnId);
writeIv2ViableReplayEntry();
}
@Override
public void configureDurableUniqueIdListener(final DurableUniqueIdListener listener, final boolean install) {
m_tasks.offer(new SiteTaskerRunnable() {
@Override
void run()
{
m_durabilityListener.configureUniqueIdListener(listener, install);
}
private SiteTaskerRunnable init(DurableUniqueIdListener listener){
taskInfo = listener.getClass().getSimpleName();
return this;
}
}.init(listener));
}
@Override
public void shutdown()
{
m_tasks.offer(m_nullTask);
}
// This is going to run in the BabySitter's thread. This and deliver are synchronized by
// virtue of both being called on InitiatorMailbox and not directly called.
// (That is, InitiatorMailbox's API, used by BabySitter, is synchronized on the same
// lock deliver() is synchronized on.)
@Override
public long[] updateReplicas(List replicas, Map partitionMasters, long snapshotSaveTxnId)
{
if (tmLog.isDebugEnabled()) {
tmLog.debug("[SpScheduler.updateReplicas] replicas to " + CoreUtils.hsIdCollectionToString(replicas) +
" on " + CoreUtils.hsIdToString(m_mailbox.getHSId())
+ " from " + CoreUtils.hsIdCollectionToString(m_replicaHSIds));
}
long[] replicasAdded = new long[0];
if (m_replicaHSIds.size() > 0 && replicas.size() > m_replicaHSIds.size()) {
// Remember the rejoin sites before update replicas set
Set rejoinHSIds = Sets.difference(new HashSet(replicas),
new HashSet(m_replicaHSIds));
replicasAdded = Longs.toArray(rejoinHSIds);
}
// First - correct the official replica set.
m_replicaHSIds = replicas;
// Update the list of remote replicas that we'll need to send to
List sendToHSIds = new ArrayList(m_replicaHSIds);
sendToHSIds.remove(m_mailbox.getHSId());
m_sendToHSIds = Longs.toArray(sendToHSIds);
// A new site joins in, forward the current txn (stream snapshot save) message to new site
if (m_isLeader && snapshotSaveTxnId != -1) {
// HACKY HACKY HACKY, we know at this time there will be only one fragment with this txnId, so it's safe to use
// Long.MAX_VALUE to match the duplicate counter key with the given txn id (there is only one!)
Entry snapshotFragment =
m_duplicateCounters.floorEntry(new DuplicateCounterKey(snapshotSaveTxnId, Long.MAX_VALUE));
assert(snapshotFragment != null);
snapshotFragment.getValue().addReplicas(replicasAdded);
// Forward fragment message to new replica
m_mailbox.send(replicasAdded, snapshotFragment.getValue().getOpenMessage());
}
// Cleanup duplicate counters and collect DONE counters
// in this list for further processing.
List doneCounters = new LinkedList();
for (Entry entry : m_duplicateCounters.entrySet()) {
DuplicateCounter counter = entry.getValue();
int result = counter.updateReplicas(m_replicaHSIds);
if (result == DuplicateCounter.DONE) {
doneCounters.add(entry.getKey());
}
}
//notify the new partition leader that the old leader has completed the Txns if needed
//after duplicate counters are cleaned. m_mailbox can be MockMailBox which is used for
//unit test
if (!m_isLeader && m_mailbox instanceof InitiatorMailbox) {
((InitiatorMailbox)m_mailbox).notifyNewLeaderOfTxnDoneIfNeeded();
}
// Maintain the CI invariant that responses arrive in txnid order.
Collections.sort(doneCounters);
for (DuplicateCounterKey key : doneCounters) {
DuplicateCounter counter = m_duplicateCounters.remove(key);
final TransactionState txn = m_outstandingTxns.get(key.m_txnId);
if (txn == null || txn.isDone()) {
m_outstandingTxns.remove(key.m_txnId);
// for MP write txns, we should use it's first SpHandle in the TransactionState
// for SP write txns, we can just use the SpHandle from the DuplicateCounterKey
long safeSpHandle = txn == null ? key.m_spHandle: txn.m_spHandle;
setRepairLogTruncationHandle(safeSpHandle, false);
}
VoltMessage resp = counter.getLastResponse();
if (resp != null) {
// MPI is tracking deps per partition HSID. We need to make
// sure we write ours into the message getting sent to the MPI
if (resp instanceof FragmentResponseMessage) {
FragmentResponseMessage fresp = (FragmentResponseMessage)resp;
fresp.setExecutorSiteId(m_mailbox.getHSId());
}
m_mailbox.send(counter.m_destinationId, resp);
}
else {
hostLog.warn("TXN " + counter.getTxnId() + " lost all replicas and " +
"had no responses. This should be impossible?");
}
}
SettableFuture written = writeIv2ViableReplayEntry();
// Get the fault log status here to ensure the leader has written it to disk
// before initiating transactions again.
blockFaultLogWriteStatus(written);
return replicasAdded;
}
/**
* Poll the replay sequencer and process the messages until it returns null
*/
private void deliverReadyTxns() {
// First, pull all the sequenced messages, if any.
VoltMessage m = m_replaySequencer.poll();
while(m != null) {
deliver(m);
m = m_replaySequencer.poll();
}
// Then, try to pull all the drainable messages, if any.
m = m_replaySequencer.drain();
while (m != null) {
if (m instanceof Iv2InitiateTaskMessage) {
// Send IGNORED response for all SPs
Iv2InitiateTaskMessage task = (Iv2InitiateTaskMessage) m;
final InitiateResponseMessage response = new InitiateResponseMessage(task);
response.setResults(new ClientResponseImpl(ClientResponse.UNEXPECTED_FAILURE,
new VoltTable[0],
ClientResponseImpl.IGNORED_TRANSACTION));
m_mailbox.send(response.getInitiatorHSId(), response);
}
m = m_replaySequencer.drain();
}
}
/**
* Sequence the message for replay if it's for CL or DR.
*
* @param message
* @return true if the message can be delivered directly to the scheduler,
* false if the message is queued
*/
@Override
public boolean sequenceForReplay(VoltMessage message)
{
boolean canDeliver = false;
long sequenceWithUniqueId = Long.MIN_VALUE;
boolean commandLog = (message instanceof TransactionInfoBaseMessage &&
(((TransactionInfoBaseMessage)message).isForReplay()));
boolean sentinel = message instanceof MultiPartitionParticipantMessage;
boolean replay = commandLog || sentinel;
boolean sequenceForReplay = m_isLeader && replay;
if (replay) {
sequenceWithUniqueId = ((TransactionInfoBaseMessage)message).getUniqueId();
}
if (sequenceForReplay) {
InitiateResponseMessage dupe = m_replaySequencer.dedupe(sequenceWithUniqueId,
(TransactionInfoBaseMessage) message);
if (dupe != null) {
// Duplicate initiate task message, send response
m_mailbox.send(dupe.getInitiatorHSId(), dupe);
}
else if (!m_replaySequencer.offer(sequenceWithUniqueId, (TransactionInfoBaseMessage) message)) {
canDeliver = true;
}
else {
deliverReadyTxns();
}
// If it's a DR sentinel, send an acknowledgement
if (sentinel && !commandLog) {
MultiPartitionParticipantMessage mppm = (MultiPartitionParticipantMessage) message;
final InitiateResponseMessage response = new InitiateResponseMessage(mppm);
ClientResponseImpl clientResponse =
new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE,
new VoltTable[0], ClientResponseImpl.IGNORED_TRANSACTION);
response.setResults(clientResponse);
m_mailbox.send(response.getInitiatorHSId(), response);
}
}
else {
if (replay) {
// Update last seen and last polled uniqueId for replicas
m_replaySequencer.updateLastSeenUniqueId(sequenceWithUniqueId,
(TransactionInfoBaseMessage) message);
m_replaySequencer.updateLastPolledUniqueId(sequenceWithUniqueId,
(TransactionInfoBaseMessage) message);
}
canDeliver = true;
}
return canDeliver;
}
// SpInitiators will see every message type. The Responses currently come
// from local work, but will come from replicas when replication is
// implemented
@Override
public void deliver(VoltMessage message)
{
if (message instanceof Iv2InitiateTaskMessage) {
handleIv2InitiateTaskMessage((Iv2InitiateTaskMessage)message);
}
else if (message instanceof InitiateResponseMessage) {
handleInitiateResponseMessage((InitiateResponseMessage)message);
}
else if (message instanceof FragmentTaskMessage) {
handleFragmentTaskMessage((FragmentTaskMessage)message);
}
else if (message instanceof FragmentResponseMessage) {
handleFragmentResponseMessage((FragmentResponseMessage)message);
}
else if (message instanceof CompleteTransactionMessage) {
handleCompleteTransactionMessage((CompleteTransactionMessage)message);
}
else if (message instanceof CompleteTransactionResponseMessage) {
handleCompleteTransactionResponseMessage((CompleteTransactionResponseMessage) message);
}
else if (message instanceof BorrowTaskMessage) {
handleBorrowTaskMessage((BorrowTaskMessage)message);
}
else if (message instanceof Iv2LogFaultMessage) {
handleIv2LogFaultMessage((Iv2LogFaultMessage)message);
}
else if (message instanceof DumpMessage) {
handleDumpMessage();
} else if (message instanceof DumpPlanThenExitMessage) {
handleDumpPlanMessage((DumpPlanThenExitMessage)message);
}
else if (message instanceof DummyTransactionTaskMessage) {
handleDummyTransactionTaskMessage((DummyTransactionTaskMessage) message);
}
else if (message instanceof DummyTransactionResponseMessage) {
handleDummyTransactionResponseMessage((DummyTransactionResponseMessage)message);
}
else if (message instanceof MPBacklogFlushMessage) {
cleanupTransactionBacklogOnRepair();
}
else {
throw new RuntimeException("UNKNOWN MESSAGE TYPE, BOOM!");
}
}
// SpScheduler expects to see InitiateTaskMessages corresponding to single-partition
// procedures only.
private void handleIv2InitiateTaskMessage(Iv2InitiateTaskMessage message)
{
if (!message.isSinglePartition()) {
throw new RuntimeException("SpScheduler.handleIv2InitiateTaskMessage " +
"should never receive multi-partition initiations.");
}
final String procedureName = message.getStoredProcedureName();
long newSpHandle;
long uniqueId = Long.MIN_VALUE;
Iv2InitiateTaskMessage msg = message;
if (m_isLeader || message.isReadOnly()) {
/*
* If this is for CL replay or DR, update the unique ID generator
*/
if (message.isForReplay()) {
uniqueId = message.getUniqueId();
try {
m_uniqueIdGenerator.updateMostRecentlyGeneratedUniqueId(uniqueId);
}
catch (Exception e) {
hostLog.fatal(e.getMessage());
hostLog.fatal("Invocation: " + message);
VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
}
}
/*
* If this is CL replay use the txnid from the CL and also
* update the txnid to match the one from the CL
*/
if (message.isForReplay()) {
TxnEgo ego = advanceTxnEgo();
newSpHandle = ego.getTxnId();
updateMaxScheduledTransactionSpHandle(newSpHandle);
} else if (m_isLeader && !message.isReadOnly()) {
TxnEgo ego = advanceTxnEgo();
newSpHandle = ego.getTxnId();
updateMaxScheduledTransactionSpHandle(newSpHandle);
uniqueId = m_uniqueIdGenerator.getNextUniqueId();
} else {
/*
* The SPI read or the short circuit read case. Since we are read only,
* do not create new transaction IDs but reuse the last seen
* txnid. For a timestamp, might as well give a reasonable one
* for a read heavy workload so time isn't bursty.
*/
uniqueId = UniqueIdGenerator.makeIdFromComponents(
Math.max(System.currentTimeMillis(), m_uniqueIdGenerator.lastUsedTime),
0,
m_uniqueIdGenerator.partitionId);
newSpHandle = getMaxScheduledTxnSpHandle();
}
// Need to set the SP handle on the received message
// Need to copy this or the other local sites handling
// the same initiate task message will overwrite each
// other's memory -- the message isn't copied on delivery
// to other local mailboxes.
msg = new Iv2InitiateTaskMessage(
message.getInitiatorHSId(),
message.getCoordinatorHSId(),
getRepairLogTruncationHandleForReplicas(),
message.getTxnId(),
message.getUniqueId(),
message.isReadOnly(),
message.isSinglePartition(),
null,
message.getStoredProcedureInvocation(),
message.getClientInterfaceHandle(),
message.getConnectionId(),
message.isForReplay());
msg.setSpHandle(newSpHandle);
logRepair(msg);
// Also, if this is a vanilla single-part procedure, make the TXNID
// be the SpHandle (for now)
// Only system procedures are every-site, so we'll check through the SystemProcedureCatalog
if (SystemProcedureCatalog.listing.get(procedureName) == null ||
!SystemProcedureCatalog.listing.get(procedureName).getEverysite())
{
msg.setTxnId(newSpHandle);
msg.setUniqueId(uniqueId);
}
// The leader will be responsible to replicate messages to replicas.
// Don't replicate reads, no matter FAST or SAFE.
if (m_isLeader && (!msg.isReadOnly()) && IS_KSAFE_CLUSTER ) {
for (long hsId : m_sendToHSIds) {
Iv2InitiateTaskMessage finalMsg = msg;
final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPI);
if (traceLog != null) {
traceLog.add(() -> VoltTrace.beginAsync("replicateSP",
MiscUtils.hsIdPairTxnIdToString(m_mailbox.getHSId(), hsId, finalMsg.getSpHandle(), finalMsg.getClientInterfaceHandle()),
"txnId", TxnEgo.txnIdToString(finalMsg.getTxnId()),
"dest", CoreUtils.hsIdToString(hsId)));
}
}
Iv2InitiateTaskMessage replmsg =
new Iv2InitiateTaskMessage(m_mailbox.getHSId(),
m_mailbox.getHSId(),
getRepairLogTruncationHandleForReplicas(),
msg.getTxnId(),
msg.getUniqueId(),
msg.isReadOnly(),
msg.isSinglePartition(),
msg.getStoredProcedureInvocation(),
msg.getClientInterfaceHandle(),
msg.getConnectionId(),
msg.isForReplay(),
true);
// Update the handle in the copy since the constructor doesn't set it
replmsg.setSpHandle(newSpHandle);
// K-safety cluster doesn't always mean partition has replicas,
// node failure may reduce the number of replicas for each partition
if (m_sendToHSIds.length > 0) {
m_mailbox.send(m_sendToHSIds, replmsg);
}
DuplicateCounter counter = new DuplicateCounter(
msg.getInitiatorHSId(),
msg.getTxnId(),
m_replicaHSIds,
replmsg);
safeAddToDuplicateCounterMap(new DuplicateCounterKey(msg.getTxnId(), newSpHandle), counter);
}
}
else {
setMaxSeenTxnId(msg.getSpHandle());
newSpHandle = msg.getSpHandle();
logRepair(msg);
// Don't update the uniqueID if this is a run-everywhere txn, because it has an MPI unique ID.
if (UniqueIdGenerator.getPartitionIdFromUniqueId(msg.getUniqueId()) == m_partitionId) {
m_uniqueIdGenerator.updateMostRecentlyGeneratedUniqueId(msg.getUniqueId());
}
}
Iv2Trace.logIv2InitiateTaskMessage(message, m_mailbox.getHSId(), msg.getTxnId(), newSpHandle);
doLocalInitiateOffer(msg);
}
/**
* Do the work necessary to turn the Iv2InitiateTaskMessage into a
* TransactionTask which can be queued to the TransactionTaskQueue.
* This is reused by both the normal message handling path and the repair
* path, and assumes that the caller has dealt with or ensured that the
* necessary ID, SpHandles, and replication issues are resolved.
*/
private void doLocalInitiateOffer(Iv2InitiateTaskMessage msg)
{
final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPI);
if (traceLog != null) {
final String threadName = Thread.currentThread().getName(); // Thread name has to be materialized here
traceLog.add(() -> VoltTrace.meta("process_name", "name", CoreUtils.getHostnameOrAddress()))
.add(() -> VoltTrace.meta("thread_name", "name", threadName))
.add(() -> VoltTrace.meta("thread_sort_index", "sort_index", Integer.toString(10000)))
.add(() -> VoltTrace.beginAsync("initsp",
MiscUtils.hsIdPairTxnIdToString(m_mailbox.getHSId(), m_mailbox.getHSId(), msg.getSpHandle(), msg.getClientInterfaceHandle()),
"ciHandle", msg.getClientInterfaceHandle(),
"txnId", TxnEgo.txnIdToString(msg.getTxnId()),
"partition", m_partitionId,
"read", msg.isReadOnly(),
"name", msg.getStoredProcedureName(),
"hsId", CoreUtils.hsIdToString(m_mailbox.getHSId())));
}
final String procedureName = msg.getStoredProcedureName();
final SpProcedureTask task =
new SpProcedureTask(m_mailbox, procedureName, m_pendingTasks, msg);
ListenableFuture