org.apache.cassandra.service.StorageService Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of cassandra-all Show documentation
Show all versions of cassandra-all Show documentation
The Apache Cassandra Project develops a highly scalable second-generation distributed database, bringing together Dynamo's fully distributed design and Bigtable's ColumnFamily-based data model.
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.service;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOError;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import javax.management.JMX;
import javax.management.NotificationBroadcasterSupport;
import javax.management.ObjectName;
import javax.management.openmbean.TabularData;
import javax.management.openmbean.TabularDataSupport;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Collections2;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import com.google.common.collect.SetMultimap;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.jmx.JMXConfiguratorMBean;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.Appender;
import ch.qos.logback.core.hook.DelayingShutdownHook;
import org.apache.cassandra.auth.AuthKeyspace;
import org.apache.cassandra.auth.AuthMigrationListener;
import org.apache.cassandra.batchlog.BatchRemoveVerbHandler;
import org.apache.cassandra.batchlog.BatchStoreVerbHandler;
import org.apache.cassandra.batchlog.BatchlogManager;
import org.apache.cassandra.concurrent.NamedThreadFactory;
import org.apache.cassandra.concurrent.ScheduledExecutors;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.Config;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.config.ViewDefinition;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.CounterMutationVerbHandler;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.DefinitionsUpdateVerbHandler;
import org.apache.cassandra.db.Keyspace;
import org.apache.cassandra.db.MigrationRequestVerbHandler;
import org.apache.cassandra.db.Mutation;
import org.apache.cassandra.db.MutationVerbHandler;
import org.apache.cassandra.db.RangeSliceVerbHandler;
import org.apache.cassandra.db.ReadCommandVerbHandler;
import org.apache.cassandra.db.ReadRepairVerbHandler;
import org.apache.cassandra.db.SchemaCheckVerbHandler;
import org.apache.cassandra.db.SizeEstimatesRecorder;
import org.apache.cassandra.db.SnapshotDetailsTabularData;
import org.apache.cassandra.db.SystemKeyspace;
import org.apache.cassandra.db.TruncateVerbHandler;
import org.apache.cassandra.db.commitlog.CommitLog;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
import org.apache.cassandra.dht.BootStrapper;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.RangeStreamer;
import org.apache.cassandra.dht.RingPosition;
import org.apache.cassandra.dht.StreamStateStore;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.dht.Token.TokenFactory;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.exceptions.InvalidRequestException;
import org.apache.cassandra.exceptions.UnavailableException;
import org.apache.cassandra.gms.ApplicationState;
import org.apache.cassandra.gms.EndpointState;
import org.apache.cassandra.gms.FailureDetector;
import org.apache.cassandra.gms.GossipDigestAck2VerbHandler;
import org.apache.cassandra.gms.GossipDigestAckVerbHandler;
import org.apache.cassandra.gms.GossipDigestSynVerbHandler;
import org.apache.cassandra.gms.GossipShutdownVerbHandler;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.gms.IEndpointStateChangeSubscriber;
import org.apache.cassandra.gms.IFailureDetector;
import org.apache.cassandra.gms.TokenSerializer;
import org.apache.cassandra.gms.VersionedValue;
import org.apache.cassandra.hints.HintVerbHandler;
import org.apache.cassandra.hints.HintsService;
import org.apache.cassandra.io.sstable.SSTableLoader;
import org.apache.cassandra.io.sstable.format.VersionAndType;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.DynamicEndpointSnitch;
import org.apache.cassandra.locator.IEndpointSnitch;
import org.apache.cassandra.locator.LocalStrategy;
import org.apache.cassandra.locator.TokenMetadata;
import org.apache.cassandra.metrics.StorageMetrics;
import org.apache.cassandra.net.AsyncOneResponse;
import org.apache.cassandra.net.MessageOut;
import org.apache.cassandra.net.MessagingService;
import org.apache.cassandra.net.ResponseVerbHandler;
import org.apache.cassandra.repair.RepairMessageVerbHandler;
import org.apache.cassandra.repair.RepairParallelism;
import org.apache.cassandra.repair.RepairRunnable;
import org.apache.cassandra.repair.SystemDistributedKeyspace;
import org.apache.cassandra.repair.messages.RepairOption;
import org.apache.cassandra.schema.KeyspaceMetadata;
import org.apache.cassandra.schema.SchemaKeyspace;
import org.apache.cassandra.service.paxos.CommitVerbHandler;
import org.apache.cassandra.service.paxos.PrepareVerbHandler;
import org.apache.cassandra.service.paxos.ProposeVerbHandler;
import org.apache.cassandra.streaming.ReplicationFinishedVerbHandler;
import org.apache.cassandra.streaming.StreamManager;
import org.apache.cassandra.streaming.StreamPlan;
import org.apache.cassandra.streaming.StreamResultFuture;
import org.apache.cassandra.streaming.StreamState;
import org.apache.cassandra.thrift.EndpointDetails;
import org.apache.cassandra.thrift.TokenRange;
import org.apache.cassandra.thrift.cassandraConstants;
import org.apache.cassandra.tracing.TraceKeyspace;
import org.apache.cassandra.utils.BackgroundActivityMonitor;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.cassandra.utils.JVMStabilityInspector;
import org.apache.cassandra.utils.MBeanWrapper;
import org.apache.cassandra.utils.OutputHandler;
import org.apache.cassandra.utils.Pair;
import org.apache.cassandra.utils.WindowsTimer;
import org.apache.cassandra.utils.WrappedRunnable;
import org.apache.cassandra.utils.progress.ProgressEvent;
import org.apache.cassandra.utils.progress.ProgressEventType;
import org.apache.cassandra.utils.progress.jmx.JMXProgressSupport;
import org.apache.cassandra.utils.progress.jmx.LegacyJMXProgressSupport;
import static java.util.Arrays.asList;
import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.stream.Collectors.toList;
import static org.apache.cassandra.index.SecondaryIndexManager.getIndexName;
import static org.apache.cassandra.index.SecondaryIndexManager.isIndexColumnFamily;
import static org.apache.cassandra.service.MigrationManager.evolveSystemKeyspace;
/**
* This abstraction contains the token/identifier of this node
* on the identifier space. This token gets gossiped around.
* This class will also maintain histograms of the load information
* of other nodes in the cluster.
*/
public class StorageService extends NotificationBroadcasterSupport implements IEndpointStateChangeSubscriber, StorageServiceMBean
{
private static final Logger logger = LoggerFactory.getLogger(StorageService.class);
public static final int RING_DELAY = getRingDelay(); // delay after which we assume ring has stablized
public static final int SCHEMA_DELAY_MILLIS = getSchemaDelay();
private static final boolean REQUIRE_SCHEMAS = !Boolean.getBoolean("cassandra.skip_schema_check");
private final JMXProgressSupport progressSupport = new JMXProgressSupport(this);
/**
* @deprecated backward support to previous notification interface
* Will be removed on 4.0
*/
@Deprecated
private final LegacyJMXProgressSupport legacyProgressSupport;
private static int getRingDelay()
{
String newdelay = System.getProperty("cassandra.ring_delay_ms");
if (newdelay != null)
{
logger.info("Overriding RING_DELAY to {}ms", newdelay);
return Integer.parseInt(newdelay);
}
else
return 30 * 1000;
}
private static int getSchemaDelay()
{
String newdelay = System.getProperty("cassandra.schema_delay_ms");
if (newdelay != null)
{
logger.info("Overriding SCHEMA_DELAY to {}ms", newdelay);
return Integer.parseInt(newdelay);
}
else
{
return 30 * 1000;
}
}
/* This abstraction maintains the token/endpoint metadata information */
private TokenMetadata tokenMetadata = new TokenMetadata();
public volatile VersionedValue.VersionedValueFactory valueFactory = new VersionedValue.VersionedValueFactory(tokenMetadata.partitioner);
private Thread drainOnShutdown = null;
private volatile boolean isShutdown = false;
public static final StorageService instance = new StorageService();
@Deprecated
public boolean isInShutdownHook()
{
return isShutdown();
}
public boolean isShutdown()
{
return isShutdown;
}
/**
* for in-jvm dtest use - forces isShutdown to be set to whatever passed in.
*/
@VisibleForTesting
public void setIsShutdownUnsafeForTests(boolean isShutdown)
{
this.isShutdown = isShutdown;
}
public Collection> getLocalRanges(String keyspaceName)
{
return getRangesForEndpoint(keyspaceName, FBUtilities.getBroadcastAddress());
}
public Collection> getPrimaryRanges(String keyspace)
{
return getPrimaryRangesForEndpoint(keyspace, FBUtilities.getBroadcastAddress());
}
public Collection> getPrimaryRangesWithinDC(String keyspace)
{
return getPrimaryRangeForEndpointWithinDC(keyspace, FBUtilities.getBroadcastAddress());
}
private final Set replicatingNodes = Collections.synchronizedSet(new HashSet());
private CassandraDaemon daemon;
private InetAddress removingNode;
/* Are we starting this node in bootstrap mode? */
private volatile boolean isBootstrapMode;
/* we bootstrap but do NOT join the ring unless told to do so */
private boolean isSurveyMode = Boolean.parseBoolean(System.getProperty
("cassandra.write_survey", "false"));
/* true if node is rebuilding and receiving data */
private final AtomicBoolean isRebuilding = new AtomicBoolean();
private boolean initialized;
private volatile boolean joined = false;
private final AtomicBoolean authSetupCalled = new AtomicBoolean(false);
/* the probability for tracing any particular request, 0 disables tracing and 1 enables for all */
private double traceProbability = 0.0;
private static enum Mode { STARTING, NORMAL, JOINING, LEAVING, DECOMMISSIONED, MOVING, DRAINING, DRAINED }
private volatile Mode operationMode = Mode.STARTING;
/* Used for tracking drain progress */
private volatile int totalCFs, remainingCFs;
private static final AtomicInteger nextRepairCommand = new AtomicInteger();
private final List lifecycleSubscribers = new CopyOnWriteArrayList<>();
private static final BackgroundActivityMonitor bgMonitor = new BackgroundActivityMonitor();
private final String jmxObjectName;
private Collection bootstrapTokens = null;
// true when keeping strict consistency while bootstrapping
private boolean useStrictConsistency = Boolean.parseBoolean(System.getProperty("cassandra.consistent.rangemovement", "true"));
private static final boolean allowSimultaneousMoves = Boolean.valueOf(System.getProperty("cassandra.consistent.simultaneousmoves.allow","false"));
private static final boolean joinRing = Boolean.parseBoolean(System.getProperty("cassandra.join_ring", "true"));
private boolean replacing;
private UUID replacingId;
private final StreamStateStore streamStateStore = new StreamStateStore();
public final SSTablesGlobalTracker sstablesTracker;
public boolean isSurveyMode()
{
return isSurveyMode;
}
public boolean hasJoined()
{
return joined;
}
/** This method updates the local token on disk */
public void setTokens(Collection tokens)
{
assert tokens != null && !tokens.isEmpty() : "Node needs at least one token.";
if (logger.isDebugEnabled())
logger.debug("Setting tokens to {}", tokens);
SystemKeyspace.updateTokens(tokens);
Collection localTokens = getLocalTokens();
setGossipTokens(localTokens);
tokenMetadata.updateNormalTokens(tokens, FBUtilities.getBroadcastAddress());
setMode(Mode.NORMAL, false);
}
public void setGossipTokens(Collection tokens)
{
List> states = new ArrayList>();
states.add(Pair.create(ApplicationState.TOKENS, valueFactory.tokens(tokens)));
states.add(Pair.create(ApplicationState.STATUS, valueFactory.normal(tokens)));
Gossiper.instance.addLocalApplicationStates(states);
}
public StorageService()
{
// use dedicated executor for sending JMX notifications
super(Executors.newSingleThreadExecutor());
jmxObjectName = "org.apache.cassandra.db:type=StorageService";
MBeanWrapper.instance.registerMBean(this, jmxObjectName);
MBeanWrapper.instance.registerMBean(StreamManager.instance, StreamManager.OBJECT_NAME);
legacyProgressSupport = new LegacyJMXProgressSupport(this, jmxObjectName);
/* register the verb handlers */
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.MUTATION, new MutationVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.READ_REPAIR, new ReadRepairVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.READ, new ReadCommandVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.RANGE_SLICE, new RangeSliceVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.PAGED_RANGE, new RangeSliceVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.COUNTER_MUTATION, new CounterMutationVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.TRUNCATE, new TruncateVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.PAXOS_PREPARE, new PrepareVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.PAXOS_PROPOSE, new ProposeVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.PAXOS_COMMIT, new CommitVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.HINT, new HintVerbHandler());
// see BootStrapper for a summary of how the bootstrap verbs interact
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.REPLICATION_FINISHED, new ReplicationFinishedVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.REQUEST_RESPONSE, new ResponseVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.INTERNAL_RESPONSE, new ResponseVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.REPAIR_MESSAGE, new RepairMessageVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.GOSSIP_SHUTDOWN, new GossipShutdownVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.GOSSIP_DIGEST_SYN, new GossipDigestSynVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.GOSSIP_DIGEST_ACK, new GossipDigestAckVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.GOSSIP_DIGEST_ACK2, new GossipDigestAck2VerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.DEFINITIONS_UPDATE, new DefinitionsUpdateVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.SCHEMA_CHECK, new SchemaCheckVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.MIGRATION_REQUEST, new MigrationRequestVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.SNAPSHOT, new SnapshotVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.ECHO, new EchoVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.BATCH_STORE, new BatchStoreVerbHandler());
MessagingService.instance().registerVerbHandlers(MessagingService.Verb.BATCH_REMOVE, new BatchRemoveVerbHandler());
sstablesTracker = new SSTablesGlobalTracker(DatabaseDescriptor.getSSTableFormat());
}
public void registerDaemon(CassandraDaemon daemon)
{
this.daemon = daemon;
}
public void register(IEndpointLifecycleSubscriber subscriber)
{
lifecycleSubscribers.add(subscriber);
}
public void unregister(IEndpointLifecycleSubscriber subscriber)
{
lifecycleSubscribers.remove(subscriber);
}
// should only be called via JMX
public void stopGossiping()
{
if (initialized)
{
if (!isNormal() && joinRing)
throw new IllegalStateException("Unable to stop gossip because the node is not in the normal state. Try to stop the node instead.");
logger.warn("Stopping gossip by operator request");
if (isNativeTransportRunning())
{
logger.warn("Disabling gossip while native transport is still active is unsafe");
}
Gossiper.instance.stop();
initialized = false;
}
}
// should only be called via JMX
public synchronized void startGossiping()
{
if (!initialized)
{
checkServiceAllowedToStart("gossip");
logger.warn("Starting gossip by operator request");
Collection tokens = SystemKeyspace.getSavedTokens();
boolean validTokens = tokens != null && !tokens.isEmpty();
// shouldn't be called before these are set if we intend to join the ring/are in the process of doing so
if (joined || joinRing)
assert validTokens : "Cannot start gossiping for a node intended to join without valid tokens";
if (validTokens)
setGossipTokens(tokens);
Gossiper.instance.forceNewerGeneration();
Gossiper.instance.start((int) (System.currentTimeMillis() / 1000));
initialized = true;
}
}
// should only be called via JMX
public boolean isGossipRunning()
{
return Gossiper.instance.isEnabled();
}
// should only be called via JMX
public synchronized void startRPCServer()
{
checkServiceAllowedToStart("thrift");
if (daemon == null)
{
throw new IllegalStateException("No configured daemon");
}
// We only start transports if bootstrap has completed and we're not in survey mode, OR if we are in
// survey mode and streaming has completed but we're not using auth.
// OR if we have not joined the ring yet.
if (StorageService.instance.hasJoined())
{
if (StorageService.instance.isSurveyMode())
{
if (StorageService.instance.isBootstrapMode() || DatabaseDescriptor.getAuthenticator().requireAuthentication())
{
throw new IllegalStateException("Not starting RPC server in write_survey mode as it's bootstrapping or " +
"auth is enabled");
}
}
else
{
if (!SystemKeyspace.bootstrapComplete())
{
throw new IllegalStateException("Node is not yet bootstrapped completely. Use nodetool to check bootstrap state and resume. For more, see `nodetool help bootstrap`");
}
}
}
daemon.startThriftServer();
}
public void stopRPCServer()
{
if (daemon == null)
{
throw new IllegalStateException("No configured daemon");
}
daemon.stopThriftServer();
}
public boolean isRPCServerRunning()
{
if (daemon == null)
{
return false;
}
return daemon.isThriftServerRunning();
}
public synchronized void startNativeTransport()
{
checkServiceAllowedToStart("native transport");
if (daemon == null)
{
throw new IllegalStateException("No configured daemon");
}
try
{
daemon.startNativeTransport();
}
catch (Exception e)
{
throw new RuntimeException("Error starting native transport: " + e.getMessage());
}
}
public void stopNativeTransport()
{
if (daemon == null)
{
throw new IllegalStateException("No configured daemon");
}
daemon.stopNativeTransport();
}
public boolean isNativeTransportRunning()
{
if (daemon == null)
{
return false;
}
return daemon.isNativeTransportRunning();
}
public int getMaxNativeProtocolVersion()
{
if (daemon == null)
{
throw new IllegalStateException("No configured daemon");
}
return daemon.getMaxNativeProtocolVersion();
}
private void refreshMaxNativeProtocolVersion()
{
if (daemon != null)
{
daemon.refreshMaxNativeProtocolVersion();
}
}
public void stopTransports()
{
if (isRPCServerRunning())
{
logger.error("Stopping RPC server");
stopRPCServer();
}
if (isNativeTransportRunning())
{
logger.error("Stopping native transport");
stopNativeTransport();
}
if (isInitialized())
{
logger.error("Stopping gossiper");
stopGossiping();
}
}
/**
* Set the Gossip flag RPC_READY to false and then
* shutdown the client services (thrift and CQL).
*
* Note that other nodes will do this for us when
* they get the Gossip shutdown message, so even if
* we don't get time to broadcast this, it is not a problem.
*
* See {@link Gossiper#markAsShutdown(InetAddress)}
*/
private void shutdownClientServers()
{
setRpcReady(false);
stopRPCServer();
stopNativeTransport();
}
public void stopClient()
{
Gossiper.instance.unregister(this);
Gossiper.instance.stop();
MessagingService.instance().shutdown();
// give it a second so that task accepted before the MessagingService shutdown gets submitted to the stage (to avoid RejectedExecutionException)
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
StageManager.shutdownNow();
}
public boolean isInitialized()
{
return initialized;
}
public boolean isSetupCompleted()
{
return daemon == null
? false
: daemon.setupCompleted();
}
public void stopDaemon()
{
if (daemon == null)
throw new IllegalStateException("No configured daemon");
daemon.deactivate();
}
public synchronized Collection prepareReplacementInfo() throws ConfigurationException
{
logger.info("Gathering node replacement information for {}", DatabaseDescriptor.getReplaceAddress());
if (!MessagingService.instance().isListening())
MessagingService.instance().listen();
if (!shouldBootstrap() && !Boolean.getBoolean("cassandra.allow_unsafe_replace"))
throw new RuntimeException("Replacing a node without bootstrapping risks invalidating consistency " +
"guarantees as the expected data may not be present until repair is run. " +
"To perform this operation, please restart with " +
"-Dcassandra.allow_unsafe_replace=true");
// make magic happen
Map epStates = Gossiper.instance.doShadowRound();
// now that we've gossiped at least once, we should be able to find the node we're replacing
if (epStates.get(DatabaseDescriptor.getReplaceAddress())== null)
throw new RuntimeException("Cannot replace_address " + DatabaseDescriptor.getReplaceAddress() + " because it doesn't exist in gossip");
replacingId = Gossiper.instance.getHostId(DatabaseDescriptor.getReplaceAddress(), epStates);
try
{
VersionedValue tokensVersionedValue = epStates.get(DatabaseDescriptor.getReplaceAddress()).getApplicationState(ApplicationState.TOKENS);
if (tokensVersionedValue == null)
throw new RuntimeException("Could not find tokens for " + DatabaseDescriptor.getReplaceAddress() + " to replace");
Collection tokens = TokenSerializer.deserialize(tokenMetadata.partitioner, new DataInputStream(new ByteArrayInputStream(tokensVersionedValue.toBytes())));
if (isReplacingSameAddress())
{
SystemKeyspace.setLocalHostId(replacingId); // use the replacee's host Id as our own so we receive hints, etc
}
return tokens;
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
public synchronized void checkForEndpointCollision() throws ConfigurationException
{
logger.debug("Starting shadow gossip round to check for endpoint collision");
if (!MessagingService.instance().isListening())
MessagingService.instance().listen();
Map epStates = Gossiper.instance.doShadowRound();
if (!Gossiper.instance.isSafeForBootstrap(FBUtilities.getBroadcastAddress(), epStates))
{
throw new RuntimeException(String.format("A node with address %s already exists, cancelling join. " +
"Use cassandra.replace_address if you want to replace this node.",
FBUtilities.getBroadcastAddress()));
}
if (useStrictConsistency && !allowSimultaneousMoves())
{
for (Map.Entry entry : epStates.entrySet())
{
// ignore local node or empty status
if (entry.getKey().equals(FBUtilities.getBroadcastAddress()) || entry.getValue().getApplicationState(ApplicationState.STATUS) == null)
continue;
String[] pieces = splitValue(entry.getValue().getApplicationState(ApplicationState.STATUS));
assert (pieces.length > 0);
String state = pieces[0];
if (state.equals(VersionedValue.STATUS_BOOTSTRAPPING) || state.equals(VersionedValue.STATUS_LEAVING) || state.equals(VersionedValue.STATUS_MOVING))
throw new UnsupportedOperationException("Other bootstrapping/leaving/moving nodes detected, cannot bootstrap while cassandra.consistent.rangemovement is true");
}
}
}
private boolean allowSimultaneousMoves()
{
return allowSimultaneousMoves && DatabaseDescriptor.getNumTokens() == 1;
}
// for testing only
public void unsafeInitialize() throws ConfigurationException
{
initialized = true;
Gossiper.instance.register(this);
Gossiper.instance.start((int) (System.currentTimeMillis() / 1000)); // needed for node-ring gathering.
Gossiper.instance.addLocalApplicationState(ApplicationState.NET_VERSION, valueFactory.networkVersion());
if (!MessagingService.instance().isListening())
MessagingService.instance().listen();
}
public void populateTokenMetadata()
{
if (Boolean.parseBoolean(System.getProperty("cassandra.load_ring_state", "true")))
{
logger.info("Populating token metadata from system tables");
Multimap loadedTokens = SystemKeyspace.loadTokens();
if (!shouldBootstrap()) // if we have not completed bootstrapping, we should not add ourselves as a normal token
loadedTokens.putAll(FBUtilities.getBroadcastAddress(), SystemKeyspace.getSavedTokens());
for (InetAddress ep : loadedTokens.keySet())
tokenMetadata.updateNormalTokens(loadedTokens.get(ep), ep);
logger.info("Token metadata: {}", tokenMetadata);
}
}
public synchronized void initServer() throws ConfigurationException
{
initServer(RING_DELAY);
}
public synchronized void initServer(int delay) throws ConfigurationException
{
logger.info("Cassandra version: {}", FBUtilities.getReleaseVersionString());
logger.info("Thrift API version: {}", cassandraConstants.VERSION);
logger.info("CQL supported versions: {} (default: {})",
StringUtils.join(ClientState.getCQLSupportedVersion(), ","), ClientState.DEFAULT_CQL_VERSION);
initialized = true;
try
{
// Ensure StorageProxy is initialized on start-up; see CASSANDRA-3797.
Class.forName("org.apache.cassandra.service.StorageProxy");
// also IndexSummaryManager, which is otherwise unreferenced
Class.forName("org.apache.cassandra.io.sstable.IndexSummaryManager");
}
catch (ClassNotFoundException e)
{
throw new AssertionError(e);
}
if (Boolean.parseBoolean(System.getProperty("cassandra.load_ring_state", "true")))
{
logger.info("Loading persisted ring state");
Multimap loadedTokens = SystemKeyspace.loadTokens();
Map loadedHostIds = SystemKeyspace.loadHostIds();
for (InetAddress ep : loadedTokens.keySet())
{
if (ep.equals(FBUtilities.getBroadcastAddress()))
{
// entry has been mistakenly added, delete it
SystemKeyspace.removeEndpoint(ep);
}
else
{
if (loadedHostIds.containsKey(ep))
tokenMetadata.updateHostId(loadedHostIds.get(ep), ep);
Gossiper.runInGossipStageBlocking(() -> Gossiper.instance.addSavedEndpoint(ep));
}
}
}
// daemon threads, like our executors', continue to run while shutdown hooks are invoked
drainOnShutdown = new Thread(NamedThreadFactory.threadLocalDeallocator(new WrappedRunnable()
{
@Override
public void runMayThrow() throws InterruptedException, ExecutionException, IOException
{
drain(true);
if (FBUtilities.isWindows())
WindowsTimer.endTimerPeriod(DatabaseDescriptor.getWindowsTimerInterval());
// Cleanup logback
DelayingShutdownHook logbackHook = new DelayingShutdownHook();
logbackHook.setContext((LoggerContext)LoggerFactory.getILoggerFactory());
logbackHook.run();
// wait for miscellaneous tasks like sstable and commitlog segment deletion
ScheduledExecutors.nonPeriodicTasks.shutdown();
if (!ScheduledExecutors.nonPeriodicTasks.awaitTermination(1, MINUTES))
logger.warn("Miscellaneous task executor still busy after one minute; proceeding with shutdown");
}
}), "StorageServiceShutdownHook");
Runtime.getRuntime().addShutdownHook(drainOnShutdown);
replacing = DatabaseDescriptor.isReplacing();
if (!Boolean.parseBoolean(System.getProperty("cassandra.start_gossip", "true")))
{
logger.info("Not starting gossip as requested.");
return;
}
prepareToJoin();
// Has to be called after the host id has potentially changed in prepareToJoin().
try
{
CacheService.instance.counterCache.loadSavedAsync().get();
}
catch (Throwable t)
{
JVMStabilityInspector.inspectThrowable(t);
logger.warn("Error loading counter cache", t);
}
if (joinRing)
{
joinTokenRing(delay);
}
else
{
Collection tokens = SystemKeyspace.getSavedTokens();
if (!tokens.isEmpty())
{
tokenMetadata.updateNormalTokens(tokens, FBUtilities.getBroadcastAddress());
// order is important here, the gossiper can fire in between adding these two states. It's ok to send TOKENS without STATUS, but *not* vice versa.
List> states = new ArrayList>();
states.add(Pair.create(ApplicationState.TOKENS, valueFactory.tokens(tokens)));
states.add(Pair.create(ApplicationState.STATUS, valueFactory.hibernate(true)));
Gossiper.instance.addLocalApplicationStates(states);
}
doAuthSetup(true);
logger.info("Not joining ring as requested. Use JMX (StorageService->joinRing()) to initiate ring joining");
}
}
/**
* In the event of forceful termination we need to remove the shutdown hook to prevent hanging (OOM for instance)
*/
public void removeShutdownHook()
{
if (drainOnShutdown != null)
Runtime.getRuntime().removeShutdownHook(drainOnShutdown);
if (FBUtilities.isWindows())
WindowsTimer.endTimerPeriod(DatabaseDescriptor.getWindowsTimerInterval());
}
private boolean shouldBootstrap()
{
return DatabaseDescriptor.isAutoBootstrap() && !SystemKeyspace.bootstrapComplete() && !DatabaseDescriptor.getSeeds().contains(FBUtilities.getBroadcastAddress());
}
@VisibleForTesting
public void prepareToJoin() throws ConfigurationException
{
MigrationCoordinator.instance.start();
if (!joined)
{
Map appStates = new EnumMap<>(ApplicationState.class);
if (SystemKeyspace.wasDecommissioned())
{
if (Boolean.getBoolean("cassandra.override_decommission"))
{
logger.warn("This node was decommissioned, but overriding by operator request.");
SystemKeyspace.setBootstrapState(SystemKeyspace.BootstrapState.COMPLETED);
}
else
throw new ConfigurationException("This node was decommissioned and will not rejoin the ring unless cassandra.override_decommission=true has been set, or all existing data is removed and the node is bootstrapped again");
}
if (replacing && !joinRing)
throw new ConfigurationException("Cannot set both join_ring=false and attempt to replace a node");
if (DatabaseDescriptor.getReplaceTokens().size() > 0 || DatabaseDescriptor.getReplaceNode() != null)
throw new RuntimeException("Replace method removed; use cassandra.replace_address instead");
if (replacing)
{
if (SystemKeyspace.bootstrapComplete())
throw new RuntimeException("Cannot replace address with a node that is already bootstrapped");
bootstrapTokens = prepareReplacementInfo();
if (!shouldBootstrap())
{
// Will not do replace procedure, persist the tokens we're taking over locally
// so that they don't get clobbered with auto generated ones in joinTokenRing
SystemKeyspace.updateTokens(bootstrapTokens);
}
if (isReplacingSameAddress())
{
logger.warn("Writes will not be forwarded to this node during replacement because it has the same address as " +
"the node to be replaced ({}). If the previous node has been down for longer than max_hint_window_in_ms, " +
"repair must be run after the replacement process in order to make this node consistent.",
DatabaseDescriptor.getReplaceAddress());
appStates.put(ApplicationState.TOKENS, valueFactory.tokens(bootstrapTokens));
appStates.put(ApplicationState.STATUS, valueFactory.hibernate(true));
}
MigrationCoordinator.instance.removeAndIgnoreEndpoint(DatabaseDescriptor.getReplaceAddress());
}
else if (shouldBootstrap())
{
checkForEndpointCollision();
}
else if (SystemKeyspace.bootstrapComplete())
{
Preconditions.checkState(!Config.isClientMode());
// tokens are only ever saved to system.local after bootstrap has completed and we're joining the ring,
// or when token update operations (move, decom) are completed
Collection savedTokens = SystemKeyspace.getSavedTokens();
if (!savedTokens.isEmpty())
appStates.put(ApplicationState.TOKENS, valueFactory.tokens(savedTokens));
}
// have to start the gossip service before we can see any info on other nodes. this is necessary
// for bootstrap to get the load info it needs.
// (we won't be part of the storage ring though until we add a counterId to our state, below.)
// Seed the host ID-to-endpoint map with our own ID.
UUID localHostId = SystemKeyspace.getOrInitializeLocalHostId();
getTokenMetadata().updateHostId(localHostId, FBUtilities.getBroadcastAddress());
appStates.put(ApplicationState.NET_VERSION, valueFactory.networkVersion());
appStates.put(ApplicationState.HOST_ID, valueFactory.hostId(localHostId));
appStates.put(ApplicationState.RPC_ADDRESS, valueFactory.rpcaddress(FBUtilities.getBroadcastRpcAddress()));
appStates.put(ApplicationState.RELEASE_VERSION, valueFactory.releaseVersion());
appStates.put(ApplicationState.SSTABLE_VERSIONS, valueFactory.sstableVersions(sstablesTracker.versionsInUse()));
logger.info("Starting up server gossip");
Gossiper.instance.register(this);
Gossiper.instance.start(SystemKeyspace.incrementAndGetGeneration(), appStates); // needed for node-ring gathering.
sstablesTracker.register((notification, o) -> {
if (!(notification instanceof SSTablesVersionsInUseChangeNotification))
return;
Set versions = ((SSTablesVersionsInUseChangeNotification)notification).versionsInUse;
logger.debug("Updating local sstables version in Gossip to {}", versions);
Gossiper.instance.addLocalApplicationState(ApplicationState.SSTABLE_VERSIONS,
valueFactory.sstableVersions(versions));
});
// gossip snitch infos (local DC and rack)
gossipSnitchInfo();
// gossip Schema.emptyVersion forcing immediate check for schema updates (see MigrationManager#maybeScheduleSchemaPull)
Schema.instance.updateVersionAndAnnounce(); // Ensure we know our own actual Schema UUID in preparation for updates
if (!MessagingService.instance().isListening())
MessagingService.instance().listen();
LoadBroadcaster.instance.startBroadcasting();
HintsService.instance.startDispatch();
BatchlogManager.instance.start();
}
}
public void waitForSchema(int delay)
{
// first sleep the delay to make sure we see all our peers
for (long i = 0; i < delay; i += 1000)
{
// if we see schema, we can proceed to the next check directly
if (!Schema.instance.isEmpty())
{
logger.debug("current schema version: {}", Schema.instance.getVersion());
break;
}
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
}
boolean schemasReceived = MigrationCoordinator.instance.awaitSchemaRequests(SCHEMA_DELAY_MILLIS);
if (schemasReceived)
return;
logger.warn(String.format("There are nodes in the cluster with a different schema version than us we did not merged schemas from, " +
"our version : (%s), outstanding versions -> endpoints : %s. Use -Dcassandra.skip_schema_check=true " +
"to ignore this, -Dcassandra.skip_schema_check_for_endpoints= to skip specific endpoints," +
"or -Dcassandra.skip_schema_check_for_versions= to skip specific schema versions",
Schema.instance.getVersion(),
MigrationCoordinator.instance.outstandingVersions()));
if (REQUIRE_SCHEMAS)
throw new RuntimeException("Didn't receive schemas for all known versions within the timeout. " +
"Use -Dcassandra.skip_schema_check=true to skip this check.");
}
@VisibleForTesting
public void joinTokenRing(int delay) throws ConfigurationException
{
joined = true;
// We bootstrap if we haven't successfully bootstrapped before, as long as we are not a seed.
// If we are a seed, or if the user manually sets auto_bootstrap to false,
// we'll skip streaming data from other nodes and jump directly into the ring.
//
// The seed check allows us to skip the RING_DELAY sleep for the single-node cluster case,
// which is useful for both new users and testing.
//
// We attempted to replace this with a schema-presence check, but you need a meaningful sleep
// to get schema info from gossip which defeats the purpose. See CASSANDRA-4427 for the gory details.
Set current = new HashSet<>();
if (logger.isDebugEnabled())
{
logger.debug("Bootstrap variables: {} {} {} {}",
DatabaseDescriptor.isAutoBootstrap(),
SystemKeyspace.bootstrapInProgress(),
SystemKeyspace.bootstrapComplete(),
DatabaseDescriptor.getSeeds().contains(FBUtilities.getBroadcastAddress()));
}
if (DatabaseDescriptor.isAutoBootstrap() && !SystemKeyspace.bootstrapComplete() && DatabaseDescriptor.getSeeds().contains(FBUtilities.getBroadcastAddress()))
{
logger.info("This node will not auto bootstrap because it is configured to be a seed node.");
}
boolean dataAvailable = true; // make this to false when bootstrap streaming failed
if (shouldBootstrap())
{
if (SystemKeyspace.bootstrapInProgress())
logger.warn("Detected previous bootstrap failure; retrying");
else
SystemKeyspace.setBootstrapState(SystemKeyspace.BootstrapState.IN_PROGRESS);
setMode(Mode.JOINING, "waiting for ring information", true);
// first sleep the delay to make sure we see all our peers
for (int i = 0; i < delay; i += 1000)
{
// if we see schema, we can proceed to the next check directly
if (!Schema.instance.getVersion().equals(Schema.emptyVersion))
{
logger.debug("got schema: {}", Schema.instance.getVersion());
break;
}
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
}
waitForSchema(delay);
setMode(Mode.JOINING, "schema complete, ready to bootstrap", true);
setMode(Mode.JOINING, "waiting for pending range calculation", true);
PendingRangeCalculatorService.instance.blockUntilFinished();
setMode(Mode.JOINING, "calculation complete, ready to bootstrap", true);
logger.debug("... got ring + schema info");
if (useStrictConsistency && !allowSimultaneousMoves() &&
(
tokenMetadata.getBootstrapTokens().valueSet().size() > 0 ||
tokenMetadata.getLeavingEndpoints().size() > 0 ||
tokenMetadata.getMovingEndpoints().size() > 0
))
{
throw new UnsupportedOperationException("Other bootstrapping/leaving/moving nodes detected, cannot bootstrap while cassandra.consistent.rangemovement is true");
}
// get bootstrap tokens
if (!replacing)
{
if (tokenMetadata.isMember(FBUtilities.getBroadcastAddress()))
{
String s = "This node is already a member of the token ring; bootstrap aborted. (If replacing a dead node, remove the old one from the ring first.)";
throw new UnsupportedOperationException(s);
}
setMode(Mode.JOINING, "getting bootstrap token", true);
bootstrapTokens = BootStrapper.getBootstrapTokens(tokenMetadata, FBUtilities.getBroadcastAddress());
}
else
{
if (!isReplacingSameAddress())
{
try
{
// Sleep additionally to make sure that the server actually is not alive
// and giving it more time to gossip if alive.
Thread.sleep(LoadBroadcaster.BROADCAST_INTERVAL);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
// check for operator errors...
for (Token token : bootstrapTokens)
{
InetAddress existing = tokenMetadata.getEndpoint(token);
if (existing != null)
{
long nanoDelay = delay * 1000000L;
if (Gossiper.instance.getEndpointStateForEndpoint(existing).getUpdateTimestamp() > (System.nanoTime() - nanoDelay))
throw new UnsupportedOperationException("Cannot replace a live node... ");
current.add(existing);
}
else
{
throw new UnsupportedOperationException("Cannot replace token " + token + " which does not exist!");
}
}
}
else
{
try
{
Thread.sleep(RING_DELAY);
}
catch (InterruptedException e)
{
throw new AssertionError(e);
}
}
setMode(Mode.JOINING, "Replacing a node with token(s): " + bootstrapTokens, true);
}
dataAvailable = bootstrap(bootstrapTokens);
}
else
{
bootstrapTokens = SystemKeyspace.getSavedTokens();
if (bootstrapTokens.isEmpty())
{
Collection initialTokens = DatabaseDescriptor.getInitialTokens();
if (initialTokens.size() < 1)
{
bootstrapTokens = BootStrapper.getRandomTokens(tokenMetadata, DatabaseDescriptor.getNumTokens());
if (DatabaseDescriptor.getNumTokens() == 1)
logger.warn("Generated random token {}. Random tokens will result in an unbalanced ring; see http://wiki.apache.org/cassandra/Operations", bootstrapTokens);
else
logger.info("Generated random tokens. tokens are {}", bootstrapTokens);
}
else
{
bootstrapTokens = new ArrayList<>(initialTokens.size());
for (String token : initialTokens)
bootstrapTokens.add(getTokenFactory().fromString(token));
logger.info("Saved tokens not found. Using configuration value: {}", bootstrapTokens);
}
}
else
{
if (bootstrapTokens.size() != DatabaseDescriptor.getNumTokens())
throw new ConfigurationException("Cannot change the number of tokens from " + bootstrapTokens.size() + " to " + DatabaseDescriptor.getNumTokens());
else
logger.info("Using saved tokens {}", bootstrapTokens);
}
}
setUpDistributedSystemKeyspaces();
if (!isSurveyMode)
{
if (dataAvailable)
{
finishJoiningRing(bootstrapTokens);
// remove the existing info about the replaced node.
if (!current.isEmpty())
{
Gossiper.runInGossipStageBlocking(() -> {
for (InetAddress existing : current)
Gossiper.instance.replacedEndpoint(existing);
});
}
}
else
{
logger.warn("Some data streaming failed. Use nodetool to check bootstrap state and resume. For more, see `nodetool help bootstrap`. {}", SystemKeyspace.getBootstrapState());
}
}
else
{
if (dataAvailable)
logger.info("Startup complete, but write survey mode is active, not becoming an active ring member. Use JMX (StorageService->joinRing()) to finalize ring joining.");
else
logger.warn("Some data streaming failed. Use nodetool to check bootstrap state and resume. For more, see `nodetool help bootstrap`. {}", SystemKeyspace.getBootstrapState());
}
}
@VisibleForTesting
public void ensureTraceKeyspace()
{
Optional mutation = evolveSystemKeyspace(TraceKeyspace.metadata(), TraceKeyspace.GENERATION);
mutation.ifPresent(value -> FBUtilities.waitOnFuture(MigrationManager.announceWithoutPush(Collections.singleton(value))));
}
public static boolean isReplacingSameAddress()
{
return DatabaseDescriptor.getReplaceAddress().equals(FBUtilities.getBroadcastAddress());
}
public void gossipSnitchInfo()
{
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
String dc = snitch.getDatacenter(FBUtilities.getBroadcastAddress());
String rack = snitch.getRack(FBUtilities.getBroadcastAddress());
Gossiper.instance.addLocalApplicationState(ApplicationState.DC, StorageService.instance.valueFactory.datacenter(dc));
Gossiper.instance.addLocalApplicationState(ApplicationState.RACK, StorageService.instance.valueFactory.rack(rack));
}
public synchronized void joinRing() throws IOException
{
if (!joined)
{
logger.info("Joining ring by operator request");
try
{
joinTokenRing(0);
}
catch (ConfigurationException e)
{
throw new IOException(e.getMessage());
}
}
else if (isSurveyMode)
{
// if isSurveyMode is on then verify isBootstrapMode
// node can join the ring even if isBootstrapMode is true which should not happen
if (!isBootstrapMode())
{
isSurveyMode = false;
logger.info("Leaving write survey mode and joining ring at operator request");
finishJoiningRing(SystemKeyspace.getSavedTokens());
daemon.start();
}
else
{
logger.warn("Can't join the ring because in write_survey mode and bootstrap hasn't completed");
}
}
else if (isBootstrapMode())
{
// bootstrap is not complete hence node cannot join the ring
logger.warn("Can't join the ring because bootstrap hasn't completed.");
}
}
private void finishJoiningRing(Collection tokens)
{
// start participating in the ring.
SystemKeyspace.setBootstrapState(SystemKeyspace.BootstrapState.COMPLETED);
setTokens(tokens);
assert tokenMetadata.sortedTokens().size() > 0;
doAuthSetup(false);
}
private void doAuthSetup(boolean setUpSchema)
{
if (!authSetupCalled.getAndSet(true))
{
if (setUpSchema)
{
Optional mutation = evolveSystemKeyspace(AuthKeyspace.metadata(), AuthKeyspace.GENERATION);
mutation.ifPresent(value -> FBUtilities.waitOnFuture(MigrationManager.announceWithoutPush(Collections.singleton(value))));
}
DatabaseDescriptor.getRoleManager().setup();
DatabaseDescriptor.getAuthenticator().setup();
DatabaseDescriptor.getAuthorizer().setup();
MigrationManager.instance.register(new AuthMigrationListener());
}
}
private void setUpDistributedSystemKeyspaces()
{
Collection changes = new ArrayList<>(3);
evolveSystemKeyspace( TraceKeyspace.metadata(), TraceKeyspace.GENERATION).ifPresent(changes::add);
evolveSystemKeyspace(SystemDistributedKeyspace.metadata(), SystemDistributedKeyspace.GENERATION).ifPresent(changes::add);
evolveSystemKeyspace( AuthKeyspace.metadata(), AuthKeyspace.GENERATION).ifPresent(changes::add);
if (!changes.isEmpty())
FBUtilities.waitOnFuture(MigrationManager.announceWithoutPush(changes));
}
public boolean isJoined()
{
return tokenMetadata.isMember(FBUtilities.getBroadcastAddress()) && !isSurveyMode;
}
public void rebuild(String sourceDc)
{
if (sourceDc != null)
{
TokenMetadata.Topology topology = getTokenMetadata().cloneOnlyTokenMap().getTopology();
Set availableDCs = topology.getDatacenterEndpoints().keySet();
if (!availableDCs.contains(sourceDc))
{
throw new IllegalArgumentException(String.format("Provided datacenter '%s' is not a valid datacenter, available datacenters are: %s",
sourceDc, String.join(",", availableDCs)));
}
}
// check ongoing rebuild
if (!isRebuilding.compareAndSet(false, true))
{
throw new IllegalStateException("Node is still rebuilding. Check nodetool netstats.");
}
try
{
logger.info("rebuild from dc: {}", sourceDc == null ? "(any dc)" : sourceDc);
RangeStreamer streamer = new RangeStreamer(tokenMetadata,
null,
FBUtilities.getBroadcastAddress(),
"Rebuild",
!replacing && useStrictConsistency,
DatabaseDescriptor.getEndpointSnitch(),
streamStateStore);
streamer.addSourceFilter(new RangeStreamer.FailureDetectorSourceFilter(FailureDetector.instance));
if (sourceDc != null)
streamer.addSourceFilter(new RangeStreamer.SingleDatacenterFilter(DatabaseDescriptor.getEndpointSnitch(), sourceDc));
for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
streamer.addRanges(keyspaceName, getLocalRanges(keyspaceName));
StreamResultFuture resultFuture = streamer.fetchAsync();
// wait for result
resultFuture.get();
}
catch (InterruptedException e)
{
throw new RuntimeException("Interrupted while waiting on rebuild streaming");
}
catch (ExecutionException e)
{
// This is used exclusively through JMX, so log the full trace but only throw a simple RTE
logger.error("Error while rebuilding node", e.getCause());
throw new RuntimeException("Error while rebuilding node: " + e.getCause().getMessage());
}
finally
{
// rebuild is done (successfully or not)
isRebuilding.set(false);
}
}
public void setStreamThroughputMbPerSec(int value)
{
int oldValue = DatabaseDescriptor.getStreamThroughputOutboundMegabitsPerSec();
DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(value);
StreamManager.StreamRateLimiter.updateThroughput();
logger.info("setstreamthroughput: throttle set to {} Mb/s (was {} Mb/s)", value, oldValue);
}
public int getStreamThroughputMbPerSec()
{
return DatabaseDescriptor.getStreamThroughputOutboundMegabitsPerSec();
}
public void setInterDCStreamThroughputMbPerSec(int value)
{
int oldValue = DatabaseDescriptor.getInterDCStreamThroughputOutboundMegabitsPerSec();
DatabaseDescriptor.setInterDCStreamThroughputOutboundMegabitsPerSec(value);
StreamManager.StreamRateLimiter.updateInterDCThroughput();
logger.info("setinterdcstreamthroughput: throttle set to {} Mb/s (was {} Mb/s)", value, oldValue);
}
public int getInterDCStreamThroughputMbPerSec()
{
return DatabaseDescriptor.getInterDCStreamThroughputOutboundMegabitsPerSec();
}
public int getConcurrentCompactors()
{
return DatabaseDescriptor.getConcurrentCompactors();
}
public int getCompactionThroughputMbPerSec()
{
return DatabaseDescriptor.getCompactionThroughputMbPerSec();
}
public void setCompactionThroughputMbPerSec(int value)
{
DatabaseDescriptor.setCompactionThroughputMbPerSec(value);
CompactionManager.instance.setRate(value);
}
public boolean isIncrementalBackupsEnabled()
{
return DatabaseDescriptor.isIncrementalBackupsEnabled();
}
public void setIncrementalBackupsEnabled(boolean value)
{
DatabaseDescriptor.setIncrementalBackupsEnabled(value);
}
@VisibleForTesting // only used by test
public void setMovingModeUnsafe()
{
setMode(Mode.MOVING, true);
}
/**
* Only used in jvm dtest when not using GOSSIP.
* See org.apache.cassandra.distributed.impl.Instance#initializeRing(org.apache.cassandra.distributed.api.ICluster)
*/
@VisibleForTesting
public void setNormalModeUnsafe()
{
setMode(Mode.NORMAL, true);
}
private void setMode(Mode m, boolean log)
{
setMode(m, null, log);
}
private void setMode(Mode m, String msg, boolean log)
{
operationMode = m;
String logMsg = msg == null ? m.toString() : String.format("%s: %s", m, msg);
if (log)
logger.info(logMsg);
else
logger.debug(logMsg);
}
/**
* Bootstrap node by fetching data from other nodes.
* If node is bootstrapping as a new node, then this also announces bootstrapping to the cluster.
*
* This blocks until streaming is done.
*
* @param tokens bootstrapping tokens
* @return true if bootstrap succeeds.
*/
private boolean bootstrap(final Collection tokens)
{
isBootstrapMode = true;
SystemKeyspace.updateTokens(tokens); // DON'T use setToken, that makes us part of the ring locally which is incorrect until we are done bootstrapping
if (!replacing || !isReplacingSameAddress())
{
// if not an existing token then bootstrap
List> states = new ArrayList<>();
states.add(Pair.create(ApplicationState.TOKENS, valueFactory.tokens(tokens)));
states.add(Pair.create(ApplicationState.STATUS, replacing?
valueFactory.bootReplacing(DatabaseDescriptor.getReplaceAddress()) :
valueFactory.bootstrapping(tokens)));
Gossiper.instance.addLocalApplicationStates(states);
setMode(Mode.JOINING, "sleeping " + RING_DELAY + " ms for pending range setup", true);
Uninterruptibles.sleepUninterruptibly(RING_DELAY, TimeUnit.MILLISECONDS);
}
else
{
// Dont set any state for the node which is bootstrapping the existing token...
tokenMetadata.updateNormalTokens(tokens, FBUtilities.getBroadcastAddress());
SystemKeyspace.removeEndpoint(DatabaseDescriptor.getReplaceAddress());
}
if (!Gossiper.instance.seenAnySeed())
throw new IllegalStateException("Unable to contact any seeds!");
if (Boolean.getBoolean("cassandra.reset_bootstrap_progress"))
{
logger.info("Resetting bootstrap progress to start fresh");
SystemKeyspace.resetAvailableRanges();
}
setMode(Mode.JOINING, "Starting to bootstrap...", true);
BootStrapper bootstrapper = new BootStrapper(FBUtilities.getBroadcastAddress(), tokens, tokenMetadata);
bootstrapper.addProgressListener(progressSupport);
ListenableFuture bootstrapStream = bootstrapper.bootstrap(streamStateStore, !replacing && useStrictConsistency); // handles token update
try
{
bootstrapStream.get();
bootstrapFinished();
logger.info("Bootstrap completed for tokens {}", tokens);
return true;
}
catch (Throwable e)
{
logger.error("Error while waiting on bootstrap to complete. Bootstrap will have to be restarted.", e);
return false;
}
}
/**
* All MVs have been created during bootstrap, so mark them as built
*/
private void markViewsAsBuilt() {
for (String keyspace : Schema.instance.getUserKeyspaces())
{
for (ViewDefinition view: Schema.instance.getKSMetaData(keyspace).views)
SystemKeyspace.finishViewBuildStatus(view.ksName, view.viewName);
}
}
/**
* Called when bootstrap did finish successfully
*/
private void bootstrapFinished() {
markViewsAsBuilt();
isBootstrapMode = false;
}
public boolean resumeBootstrap()
{
if (isBootstrapMode && SystemKeyspace.bootstrapInProgress())
{
logger.info("Resuming bootstrap...");
// get bootstrap tokens saved in system keyspace
final Collection tokens = SystemKeyspace.getSavedTokens();
// already bootstrapped ranges are filtered during bootstrap
BootStrapper bootstrapper = new BootStrapper(FBUtilities.getBroadcastAddress(), tokens, tokenMetadata);
bootstrapper.addProgressListener(progressSupport);
ListenableFuture bootstrapStream = bootstrapper.bootstrap(streamStateStore, !replacing && useStrictConsistency); // handles token update
Futures.addCallback(bootstrapStream, new FutureCallback()
{
@Override
public void onSuccess(StreamState streamState)
{
try
{
bootstrapFinished();
if (isSurveyMode)
{
logger.info("Startup complete, but write survey mode is active, not becoming an active ring member. Use JMX (StorageService->joinRing()) to finalize ring joining.");
}
else
{
isSurveyMode = false;
progressSupport.progress("bootstrap", ProgressEvent.createNotification("Joining ring..."));
finishJoiningRing(bootstrapTokens);
}
progressSupport.progress("bootstrap", new ProgressEvent(ProgressEventType.COMPLETE, 1, 1, "Resume bootstrap complete"));
if (!isNativeTransportRunning())
daemon.initializeClientTransports();
daemon.start();
logger.info("Resume complete");
}
catch(Exception e)
{
onFailure(e);
throw e;
}
}
@Override
public void onFailure(Throwable e)
{
String message = "Error during bootstrap: ";
if (e instanceof ExecutionException && e.getCause() != null)
{
message += e.getCause().getMessage();
}
else
{
message += e.getMessage();
}
logger.error(message, e);
progressSupport.progress("bootstrap", new ProgressEvent(ProgressEventType.ERROR, 1, 1, message));
progressSupport.progress("bootstrap", new ProgressEvent(ProgressEventType.COMPLETE, 1, 1, "Resume bootstrap complete"));
}
});
return true;
}
else
{
logger.info("Resuming bootstrap is requested, but the node is already bootstrapped.");
return false;
}
}
public boolean isBootstrapMode()
{
return isBootstrapMode;
}
public TokenMetadata getTokenMetadata()
{
return tokenMetadata;
}
/**
* Increment about the known Compaction severity of the events in this node
*/
public void reportSeverity(double incr)
{
bgMonitor.incrCompactionSeverity(incr);
}
public void reportManualSeverity(double incr)
{
bgMonitor.incrManualSeverity(incr);
}
public double getSeverity(InetAddress endpoint)
{
return bgMonitor.getSeverity(endpoint);
}
public void shutdownBGMonitorAndWait(long timeout, TimeUnit unit) throws TimeoutException, InterruptedException
{
bgMonitor.shutdownAndWait(timeout, unit);
}
/**
* for a keyspace, return the ranges and corresponding listen addresses.
* @param keyspace
* @return the endpoint map
*/
public Map, List> getRangeToEndpointMap(String keyspace)
{
/* All the ranges for the tokens */
Map, List> map = new HashMap<>();
for (Map.Entry,List> entry : getRangeToAddressMap(keyspace).entrySet())
{
map.put(entry.getKey().asList(), stringify(entry.getValue()));
}
return map;
}
/**
* Return the rpc address associated with an endpoint as a string.
* @param endpoint The endpoint to get rpc address for
* @return the rpc address
*/
public String getRpcaddress(InetAddress endpoint)
{
if (endpoint.equals(FBUtilities.getBroadcastAddress()))
return FBUtilities.getBroadcastRpcAddress().getHostAddress();
else if (Gossiper.instance.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.RPC_ADDRESS) == null)
return endpoint.getHostAddress();
else
return Gossiper.instance.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.RPC_ADDRESS).value;
}
/**
* for a keyspace, return the ranges and corresponding RPC addresses for a given keyspace.
* @param keyspace
* @return the endpoint map
*/
public Map, List> getRangeToRpcaddressMap(String keyspace)
{
/* All the ranges for the tokens */
Map, List> map = new HashMap<>();
for (Map.Entry, List> entry : getRangeToAddressMap(keyspace).entrySet())
{
List rpcaddrs = new ArrayList<>(entry.getValue().size());
for (InetAddress endpoint: entry.getValue())
{
rpcaddrs.add(getRpcaddress(endpoint));
}
map.put(entry.getKey().asList(), rpcaddrs);
}
return map;
}
public Map, List> getPendingRangeToEndpointMap(String keyspace)
{
// some people just want to get a visual representation of things. Allow null and set it to the first
// non-system keyspace.
if (keyspace == null)
keyspace = Schema.instance.getNonLocalStrategyKeyspaces().get(0);
Map, List> map = new HashMap<>();
for (Map.Entry, Collection> entry : tokenMetadata.getPendingRangesMM(keyspace).asMap().entrySet())
{
List l = new ArrayList<>(entry.getValue());
map.put(entry.getKey().asList(), stringify(l));
}
return map;
}
public Map, List> getRangeToAddressMap(String keyspace)
{
return getRangeToAddressMap(keyspace, tokenMetadata.sortedTokens());
}
public Map, List> getRangeToAddressMapInLocalDC(String keyspace)
{
Predicate isLocalDC = new Predicate()
{
public boolean apply(InetAddress address)
{
return isLocalDC(address);
}
};
Map, List> origMap = getRangeToAddressMap(keyspace, getTokensInLocalDC());
Map, List> filteredMap = Maps.newHashMap();
for (Map.Entry, List> entry : origMap.entrySet())
{
List endpointsInLocalDC = Lists.newArrayList(Collections2.filter(entry.getValue(), isLocalDC));
filteredMap.put(entry.getKey(), endpointsInLocalDC);
}
return filteredMap;
}
private List getTokensInLocalDC()
{
List filteredTokens = Lists.newArrayList();
for (Token token : tokenMetadata.sortedTokens())
{
InetAddress endpoint = tokenMetadata.getEndpoint(token);
if (isLocalDC(endpoint))
filteredTokens.add(token);
}
return filteredTokens;
}
private boolean isLocalDC(InetAddress targetHost)
{
String remoteDC = DatabaseDescriptor.getEndpointSnitch().getDatacenter(targetHost);
String localDC = DatabaseDescriptor.getEndpointSnitch().getDatacenter(FBUtilities.getBroadcastAddress());
return remoteDC.equals(localDC);
}
private Map, List> getRangeToAddressMap(String keyspace, List sortedTokens)
{
// some people just want to get a visual representation of things. Allow null and set it to the first
// non-system keyspace.
if (keyspace == null)
keyspace = Schema.instance.getNonLocalStrategyKeyspaces().get(0);
List> ranges = getAllRanges(sortedTokens);
return constructRangeToEndpointMap(keyspace, ranges);
}
/**
* The same as {@code describeRing(String)} but converts TokenRange to the String for JMX compatibility
*
* @param keyspace The keyspace to fetch information about
*
* @return a List of TokenRange(s) converted to String for the given keyspace
*/
public List describeRingJMX(String keyspace) throws IOException
{
List tokenRanges;
try
{
tokenRanges = describeRing(keyspace);
}
catch (InvalidRequestException e)
{
throw new IOException(e.getMessage());
}
List result = new ArrayList<>(tokenRanges.size());
for (TokenRange tokenRange : tokenRanges)
result.add(tokenRange.toString());
return result;
}
/**
* The TokenRange for a given keyspace.
*
* @param keyspace The keyspace to fetch information about
*
* @return a List of TokenRange(s) for the given keyspace
*
* @throws InvalidRequestException if there is no ring information available about keyspace
*/
public List describeRing(String keyspace) throws InvalidRequestException
{
return describeRing(keyspace, false);
}
/**
* The same as {@code describeRing(String)} but considers only the part of the ring formed by nodes in the local DC.
*/
public List describeLocalRing(String keyspace) throws InvalidRequestException
{
return describeRing(keyspace, true);
}
private List describeRing(String keyspace, boolean includeOnlyLocalDC) throws InvalidRequestException
{
if (!Schema.instance.getKeyspaces().contains(keyspace))
throw new InvalidRequestException("No such keyspace: " + keyspace);
if (keyspace == null || Keyspace.open(keyspace).getReplicationStrategy() instanceof LocalStrategy)
throw new InvalidRequestException("There is no ring for the keyspace: " + keyspace);
List ranges = new ArrayList<>();
Token.TokenFactory tf = getTokenFactory();
Map, List> rangeToAddressMap =
includeOnlyLocalDC
? getRangeToAddressMapInLocalDC(keyspace)
: getRangeToAddressMap(keyspace);
for (Map.Entry, List> entry : rangeToAddressMap.entrySet())
{
Range range = entry.getKey();
List addresses = entry.getValue();
List endpoints = new ArrayList<>(addresses.size());
List rpc_endpoints = new ArrayList<>(addresses.size());
List epDetails = new ArrayList<>(addresses.size());
for (InetAddress endpoint : addresses)
{
EndpointDetails details = new EndpointDetails();
details.host = endpoint.getHostAddress();
details.datacenter = DatabaseDescriptor.getEndpointSnitch().getDatacenter(endpoint);
details.rack = DatabaseDescriptor.getEndpointSnitch().getRack(endpoint);
endpoints.add(details.host);
rpc_endpoints.add(getRpcaddress(endpoint));
epDetails.add(details);
}
TokenRange tr = new TokenRange(tf.toString(range.left.getToken()), tf.toString(range.right.getToken()), endpoints)
.setEndpoint_details(epDetails)
.setRpc_endpoints(rpc_endpoints);
ranges.add(tr);
}
return ranges;
}
public Map getTokenToEndpointMap()
{
Map mapInetAddress = tokenMetadata.getNormalAndBootstrappingTokenToEndpointMap();
// in order to preserve tokens in ascending order, we use LinkedHashMap here
Map mapString = new LinkedHashMap<>(mapInetAddress.size());
List tokens = new ArrayList<>(mapInetAddress.keySet());
Collections.sort(tokens);
for (Token token : tokens)
{
mapString.put(token.toString(), mapInetAddress.get(token).getHostAddress());
}
return mapString;
}
public String getLocalHostId()
{
UUID id = getLocalHostUUID();
return id != null ? id.toString() : null;
}
public UUID getLocalHostUUID()
{
UUID id = getTokenMetadata().getHostId(FBUtilities.getBroadcastAddress());
if (id != null)
return id;
// this condition is to prevent accessing the tables when the node is not started yet, and in particular,
// when it is not going to be started at all (e.g. when running some unit tests or client tools).
else if (DatabaseDescriptor.isDaemonInitialized())
return SystemKeyspace.getLocalHostId();
return null;
}
public Map getHostIdMap()
{
return getEndpointToHostId();
}
public Map getEndpointToHostId()
{
Map mapOut = new HashMap<>();
for (Map.Entry entry : getTokenMetadata().getEndpointToHostIdMapForReading().entrySet())
mapOut.put(entry.getKey().getHostAddress(), entry.getValue().toString());
return mapOut;
}
public Map getHostIdToEndpoint()
{
Map mapOut = new HashMap<>();
for (Map.Entry entry : getTokenMetadata().getEndpointToHostIdMapForReading().entrySet())
mapOut.put(entry.getValue().toString(), entry.getKey().getHostAddress());
return mapOut;
}
/**
* Construct the range to endpoint mapping based on the true view
* of the world.
* @param ranges
* @return mapping of ranges to the replicas responsible for them.
*/
private Map, List> constructRangeToEndpointMap(String keyspace, List> ranges)
{
Map, List> rangeToEndpointMap = new HashMap<>(ranges.size());
for (Range range : ranges)
{
rangeToEndpointMap.put(range, Keyspace.open(keyspace).getReplicationStrategy().getNaturalEndpoints(range.right));
}
return rangeToEndpointMap;
}
public void beforeChange(InetAddress endpoint, EndpointState currentState, ApplicationState newStateKey, VersionedValue newValue)
{
// no-op
}
/*
* Handle the reception of a new particular ApplicationState for a particular endpoint. Note that the value of the
* ApplicationState has not necessarily "changed" since the last known value, if we already received the same update
* from somewhere else.
*
* onChange only ever sees one ApplicationState piece change at a time (even if many ApplicationState updates were
* received at the same time), so we perform a kind of state machine here. We are concerned with two events: knowing
* the token associated with an endpoint, and knowing its operation mode. Nodes can start in either bootstrap or
* normal mode, and from bootstrap mode can change mode to normal. A node in bootstrap mode needs to have
* pendingranges set in TokenMetadata; a node in normal mode should instead be part of the token ring.
*
* Normal progression of ApplicationState.STATUS values for a node should be like this:
* STATUS_BOOTSTRAPPING,token
* if bootstrapping. stays this way until all files are received.
* STATUS_NORMAL,token
* ready to serve reads and writes.
* STATUS_LEAVING,token
* get ready to leave the cluster as part of a decommission
* STATUS_LEFT,token
* set after decommission is completed.
*
* Other STATUS values that may be seen (possibly anywhere in the normal progression):
* STATUS_MOVING,newtoken
* set if node is currently moving to a new token in the ring
* REMOVING_TOKEN,deadtoken
* set if the node is dead and is being removed by its REMOVAL_COORDINATOR
* REMOVED_TOKEN,deadtoken
* set if the node is dead and has been removed by its REMOVAL_COORDINATOR
*
* Note: Any time a node state changes from STATUS_NORMAL, it will not be visible to new nodes. So it follows that
* you should never bootstrap a new node during a removenode, decommission or move.
*/
public void onChange(InetAddress endpoint, ApplicationState state, VersionedValue value)
{
if (state == ApplicationState.STATUS)
{
String[] pieces = splitValue(value);
assert (pieces.length > 0);
String moveName = pieces[0];
switch (moveName)
{
case VersionedValue.STATUS_BOOTSTRAPPING_REPLACE:
handleStateBootreplacing(endpoint, pieces);
break;
case VersionedValue.STATUS_BOOTSTRAPPING:
handleStateBootstrap(endpoint);
break;
case VersionedValue.STATUS_NORMAL:
handleStateNormal(endpoint, VersionedValue.STATUS_NORMAL);
break;
case VersionedValue.SHUTDOWN:
handleStateNormal(endpoint, VersionedValue.SHUTDOWN);
break;
case VersionedValue.REMOVING_TOKEN:
case VersionedValue.REMOVED_TOKEN:
handleStateRemoving(endpoint, pieces);
break;
case VersionedValue.STATUS_LEAVING:
handleStateLeaving(endpoint);
break;
case VersionedValue.STATUS_LEFT:
handleStateLeft(endpoint, pieces);
break;
case VersionedValue.STATUS_MOVING:
handleStateMoving(endpoint, pieces);
break;
}
}
else
{
EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
if (epState == null || Gossiper.instance.isDeadState(epState))
{
logger.debug("Ignoring state change for dead or unknown endpoint: {}", endpoint);
return;
}
if (getTokenMetadata().isMember(endpoint))
{
final ExecutorService executor = StageManager.getStage(Stage.MUTATION);
switch (state)
{
case RELEASE_VERSION:
SystemKeyspace.updatePeerReleaseVersion(endpoint, value.value, this::refreshMaxNativeProtocolVersion, executor);
break;
case DC:
updateTopology(endpoint);
SystemKeyspace.updatePeerInfo(endpoint, "data_center", value.value, executor);
break;
case RACK:
updateTopology(endpoint);
SystemKeyspace.updatePeerInfo(endpoint, "rack", value.value, executor);
break;
case RPC_ADDRESS:
try
{
SystemKeyspace.updatePeerInfo(endpoint, "rpc_address", InetAddress.getByName(value.value), executor);
}
catch (UnknownHostException e)
{
throw new RuntimeException(e);
}
break;
case SCHEMA:
SystemKeyspace.updatePeerInfo(endpoint, "schema_version", UUID.fromString(value.value), executor);
MigrationCoordinator.instance.reportEndpointVersion(endpoint, UUID.fromString(value.value));
break;
case HOST_ID:
SystemKeyspace.updatePeerInfo(endpoint, "host_id", UUID.fromString(value.value), executor);
break;
case RPC_READY:
notifyRpcChange(endpoint, epState.isRpcReady());
break;
case NET_VERSION:
updateNetVersion(endpoint, value);
break;
}
}
}
}
private static String[] splitValue(VersionedValue value)
{
return value.value.split(VersionedValue.DELIMITER_STR, -1);
}
private void updateNetVersion(InetAddress endpoint, VersionedValue value)
{
try
{
MessagingService.instance().setVersion(endpoint, Integer.valueOf(value.value));
}
catch (NumberFormatException e)
{
throw new AssertionError("Got invalid value for NET_VERSION application state: " + value.value);
}
}
public void updateTopology(InetAddress endpoint)
{
if (getTokenMetadata().isMember(endpoint))
{
getTokenMetadata().updateTopology(endpoint);
}
}
public void updateTopology()
{
getTokenMetadata().updateTopology();
}
private void updatePeerInfo(InetAddress endpoint)
{
EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
final ExecutorService executor = StageManager.getStage(Stage.MUTATION);
for (Map.Entry entry : epState.states())
{
switch (entry.getKey())
{
case RELEASE_VERSION:
SystemKeyspace.updatePeerReleaseVersion(endpoint, entry.getValue().value, this::refreshMaxNativeProtocolVersion, executor);
break;
case DC:
SystemKeyspace.updatePeerInfo(endpoint, "data_center", entry.getValue().value, executor);
break;
case RACK:
SystemKeyspace.updatePeerInfo(endpoint, "rack", entry.getValue().value, executor);
break;
case RPC_ADDRESS:
try
{
SystemKeyspace.updatePeerInfo(endpoint, "rpc_address", InetAddress.getByName(entry.getValue().value), executor);
}
catch (UnknownHostException e)
{
throw new RuntimeException(e);
}
break;
case SCHEMA:
SystemKeyspace.updatePeerInfo(endpoint, "schema_version", UUID.fromString(entry.getValue().value), executor);
break;
case HOST_ID:
SystemKeyspace.updatePeerInfo(endpoint, "host_id", UUID.fromString(entry.getValue().value), executor);
break;
}
}
}
private void notifyRpcChange(InetAddress endpoint, boolean ready)
{
if (ready)
notifyUp(endpoint);
else
notifyDown(endpoint);
}
private void notifyUp(InetAddress endpoint)
{
if (!isRpcReady(endpoint) || !Gossiper.instance.isAlive(endpoint))
return;
for (IEndpointLifecycleSubscriber subscriber : lifecycleSubscribers)
subscriber.onUp(endpoint);
}
private void notifyDown(InetAddress endpoint)
{
for (IEndpointLifecycleSubscriber subscriber : lifecycleSubscribers)
subscriber.onDown(endpoint);
}
private void notifyJoined(InetAddress endpoint)
{
if (!isStatus(endpoint, VersionedValue.STATUS_NORMAL))
return;
for (IEndpointLifecycleSubscriber subscriber : lifecycleSubscribers)
subscriber.onJoinCluster(endpoint);
}
private void notifyMoved(InetAddress endpoint)
{
for (IEndpointLifecycleSubscriber subscriber : lifecycleSubscribers)
subscriber.onMove(endpoint);
}
private void notifyLeft(InetAddress endpoint)
{
for (IEndpointLifecycleSubscriber subscriber : lifecycleSubscribers)
subscriber.onLeaveCluster(endpoint);
}
private boolean isStatus(InetAddress endpoint, String status)
{
EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
return state != null && state.getStatus().equals(status);
}
public boolean isRpcReady(InetAddress endpoint)
{
if (MessagingService.instance().getVersion(endpoint) < MessagingService.VERSION_22)
return true;
EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
return state != null && state.isRpcReady();
}
/**
* Set the RPC status. Because when draining a node we need to set the RPC
* status to not ready, and drain is called by the shutdown hook, it may be that value is false
* and there is no local endpoint state. In this case it's OK to just do nothing. Therefore,
* we assert that the local endpoint state is not null only when value is true.
*
* @param value - true indicates that RPC is ready, false indicates the opposite.
*/
public void setRpcReady(boolean value)
{
EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(FBUtilities.getBroadcastAddress());
// if value is false we're OK with a null state, if it is true we are not.
assert !value || state != null;
if (state != null)
Gossiper.instance.addLocalApplicationState(ApplicationState.RPC_READY, valueFactory.rpcReady(value));
}
private Collection getTokensFor(InetAddress endpoint)
{
try
{
EndpointState state = Gossiper.instance.getEndpointStateForEndpoint(endpoint);
if (state == null)
return Collections.emptyList();
VersionedValue versionedValue = state.getApplicationState(ApplicationState.TOKENS);
if (versionedValue == null)
return Collections.emptyList();
return TokenSerializer.deserialize(tokenMetadata.partitioner, new DataInputStream(new ByteArrayInputStream(versionedValue.toBytes())));
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
/**
* Handle node bootstrap
*
* @param endpoint bootstrapping node
*/
private void handleStateBootstrap(InetAddress endpoint)
{
Collection tokens;
// explicitly check for TOKENS, because a bootstrapping node might be bootstrapping in legacy mode; that is, not using vnodes and no token specified
tokens = getTokensFor(endpoint);
if (logger.isDebugEnabled())
logger.debug("Node {} state bootstrapping, token {}", endpoint, tokens);
// if this node is present in token metadata, either we have missed intermediate states
// or the node had crashed. Print warning if needed, clear obsolete stuff and
// continue.
if (tokenMetadata.isMember(endpoint))
{
// If isLeaving is false, we have missed both LEAVING and LEFT. However, if
// isLeaving is true, we have only missed LEFT. Waiting time between completing
// leave operation and rebootstrapping is relatively short, so the latter is quite
// common (not enough time for gossip to spread). Therefore we report only the
// former in the log.
if (!tokenMetadata.isLeaving(endpoint))
logger.info("Node {} state jump to bootstrap", endpoint);
tokenMetadata.removeEndpoint(endpoint);
}
tokenMetadata.addBootstrapTokens(tokens, endpoint);
PendingRangeCalculatorService.instance.update();
tokenMetadata.updateHostId(Gossiper.instance.getHostId(endpoint), endpoint);
}
private void handleStateBootreplacing(InetAddress newNode, String[] pieces)
{
InetAddress oldNode;
try
{
oldNode = InetAddress.getByName(pieces[1]);
}
catch (Exception e)
{
logger.error("Node {} tried to replace malformed endpoint {}.", newNode, pieces[1], e);
return;
}
if (FailureDetector.instance.isAlive(oldNode))
{
throw new RuntimeException(String.format("Node %s is trying to replace alive node %s.", newNode, oldNode));
}
Optional replacingNode = tokenMetadata.getReplacingNode(newNode);
if (replacingNode.isPresent() && !replacingNode.get().equals(oldNode))
{
throw new RuntimeException(String.format("Node %s is already replacing %s but is trying to replace %s.",
newNode, replacingNode.get(), oldNode));
}
Collection tokens = getTokensFor(newNode);
if (logger.isDebugEnabled())
logger.debug("Node {} is replacing {}, tokens {}", newNode, oldNode, tokens);
tokenMetadata.addReplaceTokens(tokens, newNode, oldNode);
PendingRangeCalculatorService.instance.update();
tokenMetadata.updateHostId(Gossiper.instance.getHostId(newNode), newNode);
}
private void ensureUpToDateTokenMetadata(String status, InetAddress endpoint)
{
Set tokens = new TreeSet<>(getTokensFor(endpoint));
if (logger.isDebugEnabled())
logger.debug("Node {} state {}, tokens {}", endpoint, status, tokens);
// If the node is previously unknown or tokens do not match, update tokenmetadata to
// have this node as 'normal' (it must have been using this token before the
// leave). This way we'll get pending ranges right.
if (!tokenMetadata.isMember(endpoint))
{
logger.info("Node {} state jump to {}", endpoint, status);
updateTokenMetadata(endpoint, tokens);
}
else if (!tokens.equals(new TreeSet<>(tokenMetadata.getTokens(endpoint))))
{
logger.warn("Node {} '{}' token mismatch. Long network partition?", endpoint, status);
updateTokenMetadata(endpoint, tokens);
}
}
private void updateTokenMetadata(InetAddress endpoint, Iterable tokens)
{
updateTokenMetadata(endpoint, tokens, new HashSet<>());
}
private void updateTokenMetadata(InetAddress endpoint, Iterable tokens, Set endpointsToRemove)
{
Set tokensToUpdateInMetadata = new HashSet<>();
Set tokensToUpdateInSystemKeyspace = new HashSet<>();
for (final Token token : tokens)
{
// we don't want to update if this node is responsible for the token and it has a later startup time than endpoint.
InetAddress currentOwner = tokenMetadata.getEndpoint(token);
if (currentOwner == null)
{
logger.debug("New node {} at token {}", endpoint, token);
tokensToUpdateInMetadata.add(token);
tokensToUpdateInSystemKeyspace.add(token);
}
else if (endpoint.equals(currentOwner))
{
// set state back to normal, since the node may have tried to leave, but failed and is now back up
tokensToUpdateInMetadata.add(token);
tokensToUpdateInSystemKeyspace.add(token);
}
else if (Gossiper.instance.compareEndpointStartup(endpoint, currentOwner) > 0)
{
tokensToUpdateInMetadata.add(token);
tokensToUpdateInSystemKeyspace.add(token);
// currentOwner is no longer current, endpoint is. Keep track of these moves, because when
// a host no longer has any tokens, we'll want to remove it.
Multimap epToTokenCopy = getTokenMetadata().getEndpointToTokenMapForReading();
epToTokenCopy.get(currentOwner).remove(token);
if (epToTokenCopy.get(currentOwner).isEmpty())
endpointsToRemove.add(currentOwner);
logger.info("Nodes {} and {} have the same token {}. {} is the new owner", endpoint, currentOwner, token, endpoint);
}
else
{
logger.info("Nodes {} and {} have the same token {}. Ignoring {}", endpoint, currentOwner, token, endpoint);
}
}
tokenMetadata.updateNormalTokens(tokensToUpdateInMetadata, endpoint);
for (InetAddress ep : endpointsToRemove)
{
removeEndpoint(ep);
if (replacing && ep.equals(DatabaseDescriptor.getReplaceAddress()))
Gossiper.instance.replacementQuarantine(ep); // quarantine locally longer than normally; see CASSANDRA-8260
}
if (!tokensToUpdateInSystemKeyspace.isEmpty())
SystemKeyspace.updateTokens(endpoint, tokensToUpdateInSystemKeyspace, StageManager.getStage(Stage.MUTATION));
}
/**
* Handle node move to normal state. That is, node is entering token ring and participating
* in reads.
*
* @param endpoint node
*/
private void handleStateNormal(final InetAddress endpoint, final String status)
{
Collection tokens = getTokensFor(endpoint);
Set endpointsToRemove = new HashSet<>();
if (logger.isDebugEnabled())
logger.debug("Node {} state {}, token {}", endpoint, status, tokens);
if (tokenMetadata.isMember(endpoint))
logger.info("Node {} state jump to {}", endpoint, status);
if (tokens.isEmpty() && status.equals(VersionedValue.STATUS_NORMAL))
logger.error("Node {} is in state normal but it has no tokens, state: {}",
endpoint,
Gossiper.instance.getEndpointStateForEndpoint(endpoint));
Optional replacingNode = tokenMetadata.getReplacingNode(endpoint);
if (replacingNode.isPresent())
{
assert !endpoint.equals(replacingNode.get()) : "Pending replacement endpoint with same address is not supported";
logger.info("Node {} will complete replacement of {} for tokens {}", endpoint, replacingNode.get(), tokens);
if (FailureDetector.instance.isAlive(replacingNode.get()))
{
logger.error("Node {} cannot complete replacement of alive node {}.", endpoint, replacingNode.get());
return;
}
endpointsToRemove.add(replacingNode.get());
}
Optional replacementNode = tokenMetadata.getReplacementNode(endpoint);
if (replacementNode.isPresent())
{
logger.warn("Node {} is currently being replaced by node {}.", endpoint, replacementNode.get());
}
updatePeerInfo(endpoint);
// Order Matters, TM.updateHostID() should be called before TM.updateNormalToken(), (see CASSANDRA-4300).
UUID hostId = Gossiper.instance.getHostId(endpoint);
InetAddress existing = tokenMetadata.getEndpointForHostId(hostId);
if (replacing && isReplacingSameAddress() && Gossiper.instance.getEndpointStateForEndpoint(DatabaseDescriptor.getReplaceAddress()) != null
&& (hostId.equals(Gossiper.instance.getHostId(DatabaseDescriptor.getReplaceAddress()))))
logger.warn("Not updating token metadata for {} because I am replacing it", endpoint);
else
{
if (existing != null && !existing.equals(endpoint))
{
if (existing.equals(FBUtilities.getBroadcastAddress()))
{
logger.warn("Not updating host ID {} for {} because it's mine", hostId, endpoint);
tokenMetadata.removeEndpoint(endpoint);
endpointsToRemove.add(endpoint);
}
else if (Gossiper.instance.compareEndpointStartup(endpoint, existing) > 0)
{
logger.warn("Host ID collision for {} between {} and {}; {} is the new owner", hostId, existing, endpoint, endpoint);
tokenMetadata.removeEndpoint(existing);
endpointsToRemove.add(existing);
tokenMetadata.updateHostId(hostId, endpoint);
}
else
{
logger.warn("Host ID collision for {} between {} and {}; ignored {}", hostId, existing, endpoint, endpoint);
tokenMetadata.removeEndpoint(endpoint);
endpointsToRemove.add(endpoint);
}
}
else
tokenMetadata.updateHostId(hostId, endpoint);
}
// capture because updateNormalTokens clears moving and member status
boolean isMember = tokenMetadata.isMember(endpoint);
boolean isMoving = tokenMetadata.isMoving(endpoint);
updateTokenMetadata(endpoint, tokens, endpointsToRemove);
if (isMoving || operationMode == Mode.MOVING)
{
tokenMetadata.removeFromMoving(endpoint);
notifyMoved(endpoint);
}
else if (!isMember) // prior to this, the node was not a member
{
notifyJoined(endpoint);
}
PendingRangeCalculatorService.instance.update();
}
/**
* Handle node preparing to leave the ring
*
* @param endpoint node
*/
private void handleStateLeaving(InetAddress endpoint)
{
// If the node is previously unknown or tokens do not match, update tokenmetadata to
// have this node as 'normal' (it must have been using this token before the
// leave). This way we'll get pending ranges right.
ensureUpToDateTokenMetadata(VersionedValue.STATUS_LEAVING, endpoint);
// at this point the endpoint is certainly a member with this token, so let's proceed
// normally
tokenMetadata.addLeavingEndpoint(endpoint);
PendingRangeCalculatorService.instance.update();
}
/**
* Handle node leaving the ring. This will happen when a node is decommissioned
*
* @param endpoint If reason for leaving is decommission, endpoint is the leaving node.
* @param pieces STATE_LEFT,token
*/
private void handleStateLeft(InetAddress endpoint, String[] pieces)
{
assert pieces.length >= 2;
Collection tokens = getTokensFor(endpoint);
if (logger.isDebugEnabled())
logger.debug("Node {} state left, tokens {}", endpoint, tokens);
excise(tokens, endpoint, extractExpireTime(pieces));
}
/**
* Handle node moving inside the ring.
*
* @param endpoint moving endpoint address
* @param pieces STATE_MOVING, token
*/
private void handleStateMoving(InetAddress endpoint, String[] pieces)
{
ensureUpToDateTokenMetadata(VersionedValue.STATUS_MOVING, endpoint);
assert pieces.length >= 2;
Token token = getTokenFactory().fromString(pieces[1]);
if (logger.isDebugEnabled())
logger.debug("Node {} state moving, new token {}", endpoint, token);
tokenMetadata.addMovingEndpoint(token, endpoint);
PendingRangeCalculatorService.instance.update();
}
/**
* Handle notification that a node being actively removed from the ring via 'removenode'
*
* @param endpoint node
* @param pieces either REMOVED_TOKEN (node is gone) or REMOVING_TOKEN (replicas need to be restored)
*/
private void handleStateRemoving(InetAddress endpoint, String[] pieces)
{
assert (pieces.length > 0);
if (endpoint.equals(FBUtilities.getBroadcastAddress()))
{
logger.info("Received removenode gossip about myself. Is this node rejoining after an explicit removenode?");
try
{
drain();
}
catch (Exception e)
{
throw new RuntimeException(e);
}
return;
}
if (tokenMetadata.isMember(endpoint))
{
String state = pieces[0];
Collection removeTokens = tokenMetadata.getTokens(endpoint);
if (VersionedValue.REMOVED_TOKEN.equals(state))
{
excise(removeTokens, endpoint, extractExpireTime(pieces));
}
else if (VersionedValue.REMOVING_TOKEN.equals(state))
{
ensureUpToDateTokenMetadata(state, endpoint);
if (logger.isDebugEnabled())
logger.debug("Tokens {} removed manually (endpoint was {})", removeTokens, endpoint);
// Note that the endpoint is being removed
tokenMetadata.addLeavingEndpoint(endpoint);
PendingRangeCalculatorService.instance.update();
// find the endpoint coordinating this removal that we need to notify when we're done
String[] coordinator = splitValue(Gossiper.instance.getEndpointStateForEndpoint(endpoint).getApplicationState(ApplicationState.REMOVAL_COORDINATOR));
UUID hostId = UUID.fromString(coordinator[1]);
// grab any data we are now responsible for and notify responsible node
restoreReplicaCount(endpoint, tokenMetadata.getEndpointForHostId(hostId));
}
}
else // now that the gossiper has told us about this nonexistent member, notify the gossiper to remove it
{
if (VersionedValue.REMOVED_TOKEN.equals(pieces[0]))
addExpireTimeIfFound(endpoint, extractExpireTime(pieces));
removeEndpoint(endpoint);
}
}
private void excise(Collection tokens, InetAddress endpoint)
{
logger.info("Removing tokens {} for {}", tokens, endpoint);
UUID hostId = tokenMetadata.getHostId(endpoint);
if (hostId != null && tokenMetadata.isMember(endpoint))
{
// enough time for writes to expire and MessagingService timeout reporter callback to fire, which is where
// hints are mostly written from - using getMinRpcTimeout() / 2 for the interval.
long delay = DatabaseDescriptor.getMinRpcTimeout() + DatabaseDescriptor.getWriteRpcTimeout();
ScheduledExecutors.optionalTasks.schedule(() -> HintsService.instance.excise(hostId), delay, TimeUnit.MILLISECONDS);
}
removeEndpoint(endpoint);
tokenMetadata.removeEndpoint(endpoint);
if (!tokens.isEmpty())
tokenMetadata.removeBootstrapTokens(tokens);
notifyLeft(endpoint);
PendingRangeCalculatorService.instance.update();
}
private void excise(Collection tokens, InetAddress endpoint, long expireTime)
{
addExpireTimeIfFound(endpoint, expireTime);
excise(tokens, endpoint);
}
/** unlike excise we just need this endpoint gone without going through any notifications **/
private void removeEndpoint(InetAddress endpoint)
{
Gossiper.runInGossipStageBlocking(() -> Gossiper.instance.removeEndpoint(endpoint));
MigrationCoordinator.instance.removeAndIgnoreEndpoint(endpoint);
SystemKeyspace.removeEndpoint(endpoint);
}
protected void addExpireTimeIfFound(InetAddress endpoint, long expireTime)
{
if (expireTime != 0L)
{
Gossiper.instance.addExpireTimeForEndpoint(endpoint, expireTime);
}
}
protected long extractExpireTime(String[] pieces)
{
return Long.parseLong(pieces[2]);
}
/**
* Finds living endpoints responsible for the given ranges
*
* @param keyspaceName the keyspace ranges belong to
* @param ranges the ranges to find sources for
* @return multimap of addresses to ranges the address is responsible for
*/
private Multimap> getNewSourceRanges(String keyspaceName, Set> ranges)
{
InetAddress myAddress = FBUtilities.getBroadcastAddress();
Multimap, InetAddress> rangeAddresses = Keyspace.open(keyspaceName).getReplicationStrategy().getRangeAddresses(tokenMetadata.cloneOnlyTokenMap());
Multimap> sourceRanges = HashMultimap.create();
IFailureDetector failureDetector = FailureDetector.instance;
// find alive sources for our new ranges
for (Range range : ranges)
{
Collection possibleRanges = rangeAddresses.get(range);
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
List sources = snitch.getSortedListByProximity(myAddress, possibleRanges);
assert (!sources.contains(myAddress));
for (InetAddress source : sources)
{
if (failureDetector.isAlive(source))
{
sourceRanges.put(source, range);
break;
}
}
}
return sourceRanges;
}
/**
* Sends a notification to a node indicating we have finished replicating data.
*
* @param remote node to send notification to
*/
private void sendReplicationNotification(InetAddress remote)
{
// notify the remote token
MessageOut msg = new MessageOut(MessagingService.Verb.REPLICATION_FINISHED);
IFailureDetector failureDetector = FailureDetector.instance;
if (logger.isDebugEnabled())
logger.debug("Notifying {} of replication completion\n", remote);
while (failureDetector.isAlive(remote))
{
AsyncOneResponse iar = MessagingService.instance().sendRR(msg, remote);
try
{
iar.get(DatabaseDescriptor.getRpcTimeout(), TimeUnit.MILLISECONDS);
return; // done
}
catch(TimeoutException e)
{
// try again
}
}
}
/**
* Called when an endpoint is removed from the ring. This function checks
* whether this node becomes responsible for new ranges as a
* consequence and streams data if needed.
*
* This is rather ineffective, but it does not matter so much
* since this is called very seldom
*
* @param endpoint the node that left
*/
private void restoreReplicaCount(InetAddress endpoint, final InetAddress notifyEndpoint)
{
Multimap>>> rangesToFetch = HashMultimap.create();
InetAddress myAddress = FBUtilities.getBroadcastAddress();
for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces())
{
Multimap, InetAddress> changedRanges = getChangedRangesForLeaving(keyspaceName, endpoint);
Set> myNewRanges = new HashSet<>();
for (Map.Entry, InetAddress> entry : changedRanges.entries())
{
if (entry.getValue().equals(myAddress))
myNewRanges.add(entry.getKey());
}
Multimap> sourceRanges = getNewSourceRanges(keyspaceName, myNewRanges);
for (Map.Entry>> entry : sourceRanges.asMap().entrySet())
{
rangesToFetch.put(keyspaceName, entry);
}
}
StreamPlan stream = new StreamPlan("Restore replica count");
for (String keyspaceName : rangesToFetch.keySet())
{
for (Map.Entry>> entry : rangesToFetch.get(keyspaceName))
{
InetAddress source = entry.getKey();
InetAddress preferred = SystemKeyspace.getPreferredIP(source);
Collection> ranges = entry.getValue();
if (logger.isDebugEnabled())
logger.debug("Requesting from {} ranges {}", source, StringUtils.join(ranges, ", "));
stream.requestRanges(source, preferred, keyspaceName, ranges);
}
}
StreamResultFuture future = stream.execute();
Futures.addCallback(future, new FutureCallback()
{
public void onSuccess(StreamState finalState)
{
sendReplicationNotification(notifyEndpoint);
}
public void onFailure(Throwable t)
{
logger.warn("Streaming to restore replica count failed", t);
// We still want to send the notification
sendReplicationNotification(notifyEndpoint);
}
});
}
// needs to be modified to accept either a keyspace or ARS.
private Multimap, InetAddress> getChangedRangesForLeaving(String keyspaceName, InetAddress endpoint)
{
// First get all ranges the leaving endpoint is responsible for
Collection> ranges = getRangesForEndpoint(keyspaceName, endpoint);
if (logger.isDebugEnabled())
logger.debug("Node {} ranges [{}]", endpoint, StringUtils.join(ranges, ", "));
Map, List> currentReplicaEndpoints = new HashMap<>(ranges.size());
// Find (for each range) all nodes that store replicas for these ranges as well
TokenMetadata metadata = tokenMetadata.cloneOnlyTokenMap(); // don't do this in the loop! #7758
for (Range range : ranges)
currentReplicaEndpoints.put(range, Keyspace.open(keyspaceName).getReplicationStrategy().calculateNaturalEndpoints(range.right, metadata));
TokenMetadata temp = tokenMetadata.cloneAfterAllLeft();
// endpoint might or might not be 'leaving'. If it was not leaving (that is, removenode
// command was used), it is still present in temp and must be removed.
if (temp.isMember(endpoint))
temp.removeEndpoint(endpoint);
Multimap, InetAddress> changedRanges = HashMultimap.create();
// Go through the ranges and for each range check who will be
// storing replicas for these ranges when the leaving endpoint
// is gone. Whoever is present in newReplicaEndpoints list, but
// not in the currentReplicaEndpoints list, will be needing the
// range.
for (Range range : ranges)
{
Collection newReplicaEndpoints = Keyspace.open(keyspaceName).getReplicationStrategy().calculateNaturalEndpoints(range.right, temp);
newReplicaEndpoints.removeAll(currentReplicaEndpoints.get(range));
if (logger.isDebugEnabled())
if (newReplicaEndpoints.isEmpty())
logger.debug("Range {} already in all replicas", range);
else
logger.debug("Range {} will be responsibility of {}", range, StringUtils.join(newReplicaEndpoints, ", "));
changedRanges.putAll(range, newReplicaEndpoints);
}
return changedRanges;
}
public void onJoin(InetAddress endpoint, EndpointState epState)
{
for (Map.Entry entry : epState.states())
{
onChange(endpoint, entry.getKey(), entry.getValue());
}
}
public void onAlive(InetAddress endpoint, EndpointState state)
{
if (tokenMetadata.isMember(endpoint))
notifyUp(endpoint);
}
public void onRemove(InetAddress endpoint)
{
tokenMetadata.removeEndpoint(endpoint);
PendingRangeCalculatorService.instance.update();
}
public void onDead(InetAddress endpoint, EndpointState state)
{
MessagingService.instance().convict(endpoint);
notifyDown(endpoint);
}
public void onRestart(InetAddress endpoint, EndpointState state)
{
// If we have restarted before the node was even marked down, we need to reset the connection pool
if (state.isAlive())
onDead(endpoint, state);
// Then, the node may have been upgraded and changed its messaging protocol version. If so, we
// want to update that before we mark the node live again to avoid problems like CASSANDRA-11128.
VersionedValue netVersion = state.getApplicationState(ApplicationState.NET_VERSION);
if (netVersion != null)
updateNetVersion(endpoint, netVersion);
}
public String getLoadString()
{
return FileUtils.stringifyFileSize(StorageMetrics.load.getCount());
}
public Map getLoadMap()
{
Map map = new HashMap<>();
for (Map.Entry entry : LoadBroadcaster.instance.getLoadInfo().entrySet())
{
map.put(entry.getKey().getHostAddress(), FileUtils.stringifyFileSize(entry.getValue()));
}
// gossiper doesn't see its own updates, so we need to special-case the local node
map.put(FBUtilities.getBroadcastAddress().getHostAddress(), getLoadString());
return map;
}
// TODO
public final void deliverHints(String host) throws UnknownHostException
{
throw new UnsupportedOperationException();
}
public Collection getLocalTokens()
{
Collection tokens = SystemKeyspace.getSavedTokens();
assert tokens != null && !tokens.isEmpty(); // should not be called before initServer sets this
return tokens;
}
@Nullable
public InetAddress getEndpointForHostId(UUID hostId)
{
return tokenMetadata.getEndpointForHostId(hostId);
}
@Nullable
public UUID getHostIdForEndpoint(InetAddress address)
{
return tokenMetadata.getHostId(address);
}
/* These methods belong to the MBean interface */
public List getTokens()
{
return getTokens(FBUtilities.getBroadcastAddress());
}
public List getTokens(String endpoint) throws UnknownHostException
{
return getTokens(InetAddress.getByName(endpoint));
}
private List getTokens(InetAddress endpoint)
{
List strTokens = new ArrayList<>();
for (Token tok : getTokenMetadata().getTokens(endpoint))
strTokens.add(tok.toString());
return strTokens;
}
public String getReleaseVersion()
{
return FBUtilities.getReleaseVersionString();
}
public String getSchemaVersion()
{
return Schema.instance.getVersion().toString();
}
public List getLeavingNodes()
{
return stringify(tokenMetadata.getLeavingEndpoints());
}
public List getMovingNodes()
{
List endpoints = new ArrayList<>();
for (Pair node : tokenMetadata.getMovingEndpoints())
{
endpoints.add(node.right.getHostAddress());
}
return endpoints;
}
public List getJoiningNodes()
{
return stringify(tokenMetadata.getBootstrapTokens().valueSet());
}
public List getLiveNodes()
{
return stringify(Gossiper.instance.getLiveMembers());
}
public Set getLiveRingMembers()
{
return getLiveRingMembers(false);
}
public Set getLiveRingMembers(boolean excludeDeadStates)
{
Set ret = new HashSet<>();
for (InetAddress ep : Gossiper.instance.getLiveMembers())
{
if (excludeDeadStates)
{
EndpointState epState = Gossiper.instance.getEndpointStateForEndpoint(ep);
if (epState == null || Gossiper.instance.isDeadState(epState))
continue;
}
if (tokenMetadata.isMember(ep))
ret.add(ep);
}
return ret;
}
public List getUnreachableNodes()
{
return stringify(Gossiper.instance.getUnreachableMembers());
}
public String[] getAllDataFileLocations()
{
String[] locations = DatabaseDescriptor.getAllDataFileLocations();
for (int i = 0; i < locations.length; i++)
locations[i] = FileUtils.getCanonicalPath(locations[i]);
return locations;
}
public String getCommitLogLocation()
{
return FileUtils.getCanonicalPath(DatabaseDescriptor.getCommitLogLocation());
}
public String getSavedCachesLocation()
{
return FileUtils.getCanonicalPath(DatabaseDescriptor.getSavedCachesLocation());
}
private List stringify(Iterable endpoints)
{
List stringEndpoints = new ArrayList<>();
for (InetAddress ep : endpoints)
{
stringEndpoints.add(ep.getHostAddress());
}
return stringEndpoints;
}
public int getCurrentGenerationNumber()
{
return Gossiper.instance.getCurrentGenerationNumber(FBUtilities.getBroadcastAddress());
}
public int forceKeyspaceCleanup(String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException
{
return forceKeyspaceCleanup(0, keyspaceName, tables);
}
public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException
{
if (Schema.isLocalSystemKeyspace(keyspaceName))
throw new RuntimeException("Cleanup of the system keyspace is neither necessary nor wise");
if (!tokenMetadata.getPendingRanges(keyspaceName, FBUtilities.getBroadcastAddress()).isEmpty())
throw new RuntimeException("Node is involved in cluster membership changes. Not safe to run cleanup.");
CompactionManager.AllSSTableOpStatus status = CompactionManager.AllSSTableOpStatus.SUCCESSFUL;
for (ColumnFamilyStore cfStore : getValidColumnFamilies(false, false, keyspaceName, tables))
{
CompactionManager.AllSSTableOpStatus oneStatus = cfStore.forceCleanup(jobs);
if (oneStatus != CompactionManager.AllSSTableOpStatus.SUCCESSFUL)
status = oneStatus;
}
return status.statusCode;
}
public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException
{
return scrub(disableSnapshot, skipCorrupted, true, 0, keyspaceName, tables);
}
public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException
{
return scrub(disableSnapshot, skipCorrupted, checkData, 0, keyspaceName, tables);
}
public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException
{
return scrub(disableSnapshot, skipCorrupted, checkData, false, jobs, keyspaceName, tables);
}
public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, boolean reinsertOverflowedTTL, int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException
{
CompactionManager.AllSSTableOpStatus status = CompactionManager.AllSSTableOpStatus.SUCCESSFUL;
for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, false, keyspaceName, tables))
{
CompactionManager.AllSSTableOpStatus oneStatus = cfStore.scrub(disableSnapshot, skipCorrupted, reinsertOverflowedTTL, checkData, jobs);
if (oneStatus != CompactionManager.AllSSTableOpStatus.SUCCESSFUL)
status = oneStatus;
}
return status.statusCode;
}
public int verify(boolean extendedVerify, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException
{
CompactionManager.AllSSTableOpStatus status = CompactionManager.AllSSTableOpStatus.SUCCESSFUL;
for (ColumnFamilyStore cfStore : getValidColumnFamilies(false, false, keyspaceName, tableNames))
{
CompactionManager.AllSSTableOpStatus oneStatus = cfStore.verify(extendedVerify);
if (oneStatus != CompactionManager.AllSSTableOpStatus.SUCCESSFUL)
status = oneStatus;
}
return status.statusCode;
}
public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... tableNames) throws IOException, ExecutionException, InterruptedException
{
return upgradeSSTables(keyspaceName, excludeCurrentVersion, 0, tableNames);
}
public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException
{
CompactionManager.AllSSTableOpStatus status = CompactionManager.AllSSTableOpStatus.SUCCESSFUL;
for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, true, keyspaceName, tableNames))
{
CompactionManager.AllSSTableOpStatus oneStatus = cfStore.sstablesRewrite(excludeCurrentVersion, jobs);
if (oneStatus != CompactionManager.AllSSTableOpStatus.SUCCESSFUL)
status = oneStatus;
}
return status.statusCode;
}
public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException
{
for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, false, keyspaceName, tableNames))
{
cfStore.forceMajorCompaction(splitOutput);
}
}
/**
* Takes the snapshot for the given keyspaces. A snapshot name must be specified.
*
* @param tag the tag given to the snapshot; may not be null or empty
* @param keyspaceNames the names of the keyspaces to snapshot; empty means "all."
*/
public void takeSnapshot(String tag, String... keyspaceNames) throws IOException
{
if (operationMode == Mode.JOINING)
throw new IOException("Cannot snapshot until bootstrap completes");
if (tag == null || tag.equals(""))
throw new IOException("You must supply a snapshot name.");
Iterable keyspaces;
if (keyspaceNames.length == 0)
{
keyspaces = Keyspace.all();
}
else
{
ArrayList t = new ArrayList<>(keyspaceNames.length);
for (String keyspaceName : keyspaceNames)
t.add(getValidKeyspace(keyspaceName));
keyspaces = t;
}
// Do a check to see if this snapshot exists before we actually snapshot
for (Keyspace keyspace : keyspaces)
if (keyspace.snapshotExists(tag))
throw new IOException("Snapshot " + tag + " already exists.");
for (Keyspace keyspace : keyspaces)
keyspace.snapshot(tag, null);
}
/**
* Takes the snapshot of a specific table. A snapshot name must be specified.
*
* @param keyspaceName the keyspace which holds the specified table
* @param tableName the table to snapshot
* @param tag the tag given to the snapshot; may not be null or empty
*/
public void takeTableSnapshot(String keyspaceName, String tableName, String tag) throws IOException
{
if (keyspaceName == null)
throw new IOException("You must supply a keyspace name");
if (operationMode == Mode.JOINING)
throw new IOException("Cannot snapshot until bootstrap completes");
if (tableName == null)
throw new IOException("You must supply a table name");
if (tableName.contains("."))
throw new IllegalArgumentException("Cannot take a snapshot of a secondary index by itself. Run snapshot on the table that owns the index.");
if (tag == null || tag.equals(""))
throw new IOException("You must supply a snapshot name.");
Keyspace keyspace = getValidKeyspace(keyspaceName);
ColumnFamilyStore columnFamilyStore = keyspace.getColumnFamilyStore(tableName);
if (columnFamilyStore.snapshotExists(tag))
throw new IOException("Snapshot " + tag + " already exists.");
columnFamilyStore.snapshot(tag);
}
/**
* Takes the snapshot of a multiple column family from different keyspaces. A snapshot name must be specified.
*
*
* @param tag
* the tag given to the snapshot; may not be null or empty
* @param tableList
* list of tables from different keyspace in the form of ks1.cf1 ks2.cf2
*/
@Override
public void takeMultipleTableSnapshot(String tag, String... tableList)
throws IOException
{
Map> keyspaceColumnfamily = new HashMap>();
for (String table : tableList)
{
String splittedString[] = table.split("\\.");
if (splittedString.length == 2)
{
String keyspaceName = splittedString[0];
String tableName = splittedString[1];
if (keyspaceName == null)
throw new IOException("You must supply a keyspace name");
if (operationMode.equals(Mode.JOINING))
throw new IOException("Cannot snapshot until bootstrap completes");
if (tableName == null)
throw new IOException("You must supply a table name");
if (tag == null || tag.equals(""))
throw new IOException("You must supply a snapshot name.");
Keyspace keyspace = getValidKeyspace(keyspaceName);
ColumnFamilyStore columnFamilyStore = keyspace.getColumnFamilyStore(tableName);
// As there can be multiple column family from same keyspace check if snapshot exist for that specific
// columnfamily and not for whole keyspace
if (columnFamilyStore.snapshotExists(tag))
throw new IOException("Snapshot " + tag + " already exists.");
if (!keyspaceColumnfamily.containsKey(keyspace))
{
keyspaceColumnfamily.put(keyspace, new ArrayList());
}
// Add Keyspace columnfamily to map in order to support atomicity for snapshot process.
// So no snapshot should happen if any one of the above conditions fail for any keyspace or columnfamily
keyspaceColumnfamily.get(keyspace).add(tableName);
}
else
{
throw new IllegalArgumentException(
"Cannot take a snapshot on secondary index or invalid column family name. You must supply a column family name in the form of keyspace.columnfamily");
}
}
for (Entry> entry : keyspaceColumnfamily.entrySet())
{
for (String table : entry.getValue())
entry.getKey().snapshot(tag, table);
}
}
private Keyspace getValidKeyspace(String keyspaceName) throws IOException
{
if (!Schema.instance.getKeyspaces().contains(keyspaceName))
{
throw new IOException("Keyspace " + keyspaceName + " does not exist");
}
return Keyspace.open(keyspaceName);
}
/**
* Remove the snapshot with the given name from the given keyspaces.
* If no tag is specified we will remove all snapshots.
*/
public void clearSnapshot(String tag, String... keyspaceNames) throws IOException
{
if(tag == null)
tag = "";
Set keyspaces = new HashSet<>();
for (String dataDir : DatabaseDescriptor.getAllDataFileLocations())
{
for(String keyspaceDir : new File(dataDir).list())
{
// Only add a ks if it has been specified as a param, assuming params were actually provided.
if (keyspaceNames.length > 0 && !Arrays.asList(keyspaceNames).contains(keyspaceDir))
continue;
keyspaces.add(keyspaceDir);
}
}
for (String keyspace : keyspaces)
Keyspace.clearSnapshot(tag, keyspace);
if (logger.isDebugEnabled())
logger.debug("Cleared out snapshot directories");
}
public Map getSnapshotDetails()
{
Map snapshotMap = new HashMap<>();
for (Keyspace keyspace : Keyspace.all())
{
if (Schema.isLocalSystemKeyspace(keyspace.getName()))
continue;
for (ColumnFamilyStore cfStore : keyspace.getColumnFamilyStores())
{
for (Map.Entry> snapshotDetail : cfStore.getSnapshotDetails().entrySet())
{
TabularDataSupport data = (TabularDataSupport)snapshotMap.get(snapshotDetail.getKey());
if (data == null)
{
data = new TabularDataSupport(SnapshotDetailsTabularData.TABULAR_TYPE);
snapshotMap.put(snapshotDetail.getKey(), data);
}
SnapshotDetailsTabularData.from(snapshotDetail.getKey(), keyspace.getName(), cfStore.getColumnFamilyName(), snapshotDetail, data);
}
}
}
return snapshotMap;
}
public long trueSnapshotsSize()
{
long total = 0;
for (Keyspace keyspace : Keyspace.all())
{
if (Schema.isLocalSystemKeyspace(keyspace.getName()))
continue;
for (ColumnFamilyStore cfStore : keyspace.getColumnFamilyStores())
{
total += cfStore.trueSnapshotsSize();
}
}
return total;
}
public void refreshSizeEstimates() throws ExecutionException
{
cleanupSizeEstimates();
FBUtilities.waitOnFuture(ScheduledExecutors.optionalTasks.submit(SizeEstimatesRecorder.instance));
}
public void cleanupSizeEstimates()
{
SetMultimap sizeEstimates = SystemKeyspace.getTablesWithSizeEstimates();
for (Entry> tablesByKeyspace : sizeEstimates.asMap().entrySet())
{
String keyspace = tablesByKeyspace.getKey();
if (!Schema.instance.getKeyspaces().contains(keyspace))
{
SystemKeyspace.clearSizeEstimates(keyspace);
}
else
{
for (String table : tablesByKeyspace.getValue())
{
if (!Schema.instance.hasCF(Pair.create(keyspace, table)))
SystemKeyspace.clearSizeEstimates(keyspace, table);
}
}
}
}
/**
* @param allowIndexes Allow index CF names to be passed in
* @param autoAddIndexes Automatically add secondary indexes if a CF has them
* @param keyspaceName keyspace
* @param cfNames CFs
* @throws java.lang.IllegalArgumentException when given CF name does not exist
*/
public Iterable getValidColumnFamilies(boolean allowIndexes, boolean autoAddIndexes, String keyspaceName, String... cfNames) throws IOException
{
Keyspace keyspace = getValidKeyspace(keyspaceName);
return keyspace.getValidColumnFamilies(allowIndexes, autoAddIndexes, cfNames);
}
/**
* Flush all memtables for a keyspace and column families.
* @param keyspaceName
* @param tableNames
* @throws IOException
*/
public void forceKeyspaceFlush(String keyspaceName, String... tableNames) throws IOException
{
for (ColumnFamilyStore cfStore : getValidColumnFamilies(true, false, keyspaceName, tableNames))
{
logger.debug("Forcing flush on keyspace {}, CF {}", keyspaceName, cfStore.name);
cfStore.forceBlockingFlush();
}
}
public int repairAsync(String keyspace, Map repairSpec)
{
RepairOption option = RepairOption.parse(repairSpec, tokenMetadata.partitioner);
// if ranges are not specified
if (option.getRanges().isEmpty())
{
if (option.isPrimaryRange())
{
// when repairing only primary range, neither dataCenters nor hosts can be set
if (option.getDataCenters().isEmpty() && option.getHosts().isEmpty())
option.getRanges().addAll(getPrimaryRanges(keyspace));
// except dataCenters only contain local DC (i.e. -local)
else if (option.isInLocalDCOnly())
option.getRanges().addAll(getPrimaryRangesWithinDC(keyspace));
else
throw new IllegalArgumentException("You need to run primary range repair on all nodes in the cluster.");
}
else
{
option.getRanges().addAll(getLocalRanges(keyspace));
}
}
return forceRepairAsync(keyspace, option, false);
}
@Deprecated
public int forceRepairAsync(String keyspace,
boolean isSequential,
Collection dataCenters,
Collection hosts,
boolean primaryRange,
boolean fullRepair,
String... tableNames)
{
return forceRepairAsync(keyspace, isSequential ? RepairParallelism.SEQUENTIAL.ordinal() : RepairParallelism.PARALLEL.ordinal(), dataCenters, hosts, primaryRange, fullRepair, tableNames);
}
@Deprecated
public int forceRepairAsync(String keyspace,
int parallelismDegree,
Collection dataCenters,
Collection hosts,
boolean primaryRange,
boolean fullRepair,
String... tableNames)
{
if (parallelismDegree < 0 || parallelismDegree > RepairParallelism.values().length - 1)
{
throw new IllegalArgumentException("Invalid parallelism degree specified: " + parallelismDegree);
}
RepairParallelism parallelism = RepairParallelism.values()[parallelismDegree];
if (FBUtilities.isWindows() && parallelism != RepairParallelism.PARALLEL)
{
logger.warn("Snapshot-based repair is not yet supported on Windows. Reverting to parallel repair.");
parallelism = RepairParallelism.PARALLEL;
}
RepairOption options = new RepairOption(parallelism, primaryRange, !fullRepair, false, 1, Collections.>emptyList(), false, false);
if (dataCenters != null)
{
options.getDataCenters().addAll(dataCenters);
}
if (hosts != null)
{
options.getHosts().addAll(hosts);
}
if (primaryRange)
{
// when repairing only primary range, neither dataCenters nor hosts can be set
if (options.getDataCenters().isEmpty() && options.getHosts().isEmpty())
options.getRanges().addAll(getPrimaryRanges(keyspace));
// except dataCenters only contain local DC (i.e. -local)
else if (options.getDataCenters().size() == 1 && options.getDataCenters().contains(DatabaseDescriptor.getLocalDataCenter()))
options.getRanges().addAll(getPrimaryRangesWithinDC(keyspace));
else
throw new IllegalArgumentException("You need to run primary range repair on all nodes in the cluster.");
}
else
{
options.getRanges().addAll(getLocalRanges(keyspace));
}
if (tableNames != null)
{
for (String table : tableNames)
{
options.getColumnFamilies().add(table);
}
}
return forceRepairAsync(keyspace, options, true);
}
@Deprecated
public int forceRepairAsync(String keyspace,
boolean isSequential,
boolean isLocal,
boolean primaryRange,
boolean fullRepair,
String... tableNames)
{
Set dataCenters = null;
if (isLocal)
{
dataCenters = Sets.newHashSet(DatabaseDescriptor.getLocalDataCenter());
}
return forceRepairAsync(keyspace, isSequential, dataCenters, null, primaryRange, fullRepair, tableNames);
}
@Deprecated
public int forceRepairRangeAsync(String beginToken,
String endToken,
String keyspaceName,
boolean isSequential,
Collection dataCenters,
Collection hosts,
boolean fullRepair,
String... tableNames)
{
return forceRepairRangeAsync(beginToken, endToken, keyspaceName,
isSequential ? RepairParallelism.SEQUENTIAL.ordinal() : RepairParallelism.PARALLEL.ordinal(),
dataCenters, hosts, fullRepair, tableNames);
}
@Deprecated
public int forceRepairRangeAsync(String beginToken,
String endToken,
String keyspaceName,
int parallelismDegree,
Collection dataCenters,
Collection hosts,
boolean fullRepair,
String... tableNames)
{
if (parallelismDegree < 0 || parallelismDegree > RepairParallelism.values().length - 1)
{
throw new IllegalArgumentException("Invalid parallelism degree specified: " + parallelismDegree);
}
RepairParallelism parallelism = RepairParallelism.values()[parallelismDegree];
if (FBUtilities.isWindows() && parallelism != RepairParallelism.PARALLEL)
{
logger.warn("Snapshot-based repair is not yet supported on Windows. Reverting to parallel repair.");
parallelism = RepairParallelism.PARALLEL;
}
if (!fullRepair)
logger.warn("Incremental repair can't be requested with subrange repair " +
"because each subrange repair would generate an anti-compacted table. " +
"The repair will occur but without anti-compaction.");
Collection> repairingRange = createRepairRangeFrom(beginToken, endToken);
RepairOption options = new RepairOption(parallelism, false, !fullRepair, false, 1, repairingRange, true, false);
if (dataCenters != null)
{
options.getDataCenters().addAll(dataCenters);
}
if (hosts != null)
{
options.getHosts().addAll(hosts);
}
if (tableNames != null)
{
for (String table : tableNames)
{
options.getColumnFamilies().add(table);
}
}
logger.info("starting user-requested repair of range {} for keyspace {} and column families {}",
repairingRange, keyspaceName, tableNames);
return forceRepairAsync(keyspaceName, options, true);
}
@Deprecated
public int forceRepairRangeAsync(String beginToken,
String endToken,
String keyspaceName,
boolean isSequential,
boolean isLocal,
boolean fullRepair,
String... tableNames)
{
Set dataCenters = null;
if (isLocal)
{
dataCenters = Sets.newHashSet(DatabaseDescriptor.getLocalDataCenter());
}
return forceRepairRangeAsync(beginToken, endToken, keyspaceName, isSequential, dataCenters, null, fullRepair, tableNames);
}
/**
* Create collection of ranges that match ring layout from given tokens.
*
* @param beginToken beginning token of the range
* @param endToken end token of the range
* @return collection of ranges that match ring layout in TokenMetadata
*/
@VisibleForTesting
Collection> createRepairRangeFrom(String beginToken, String endToken)
{
Token parsedBeginToken = getTokenFactory().fromString(beginToken);
Token parsedEndToken = getTokenFactory().fromString(endToken);
// Break up given range to match ring layout in TokenMetadata
ArrayList> repairingRange = new ArrayList<>();
ArrayList tokens = new ArrayList<>(tokenMetadata.sortedTokens());
if (!tokens.contains(parsedBeginToken))
{
tokens.add(parsedBeginToken);
}
if (!tokens.contains(parsedEndToken))
{
tokens.add(parsedEndToken);
}
// tokens now contain all tokens including our endpoints
Collections.sort(tokens);
int start = tokens.indexOf(parsedBeginToken), end = tokens.indexOf(parsedEndToken);
for (int i = start; i != end; i = (i+1) % tokens.size())
{
Range range = new Range<>(tokens.get(i), tokens.get((i+1) % tokens.size()));
repairingRange.add(range);
}
return repairingRange;
}
public TokenFactory getTokenFactory()
{
return tokenMetadata.partitioner.getTokenFactory();
}
public int forceRepairAsync(String keyspace, RepairOption options, boolean legacy)
{
if (options.getRanges().isEmpty() || Keyspace.open(keyspace).getReplicationStrategy().getReplicationFactor() < 2)
return 0;
int cmd = nextRepairCommand.incrementAndGet();
new Thread(NamedThreadFactory.threadLocalDeallocator(createRepairTask(cmd, keyspace, options, legacy))).start();
return cmd;
}
private FutureTask