org.apache.cassandra.dht.BootStrapper Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of cassandra-all Show documentation
Show all versions of cassandra-all Show documentation
The Apache Cassandra Project develops a highly scalable second-generation distributed database, bringing together Dynamo's fully distributed design and Bigtable's ColumnFamily-based data model.
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.dht;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.cassandra.utils.concurrent.Future;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.schema.Schema;
import org.apache.cassandra.db.Keyspace;
import org.apache.cassandra.dht.tokenallocator.TokenAllocation;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.locator.AbstractReplicationStrategy;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.locator.TokenMetadata;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.streaming.*;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.cassandra.utils.progress.ProgressEvent;
import org.apache.cassandra.utils.progress.ProgressEventNotifierSupport;
import org.apache.cassandra.utils.progress.ProgressEventType;
public class BootStrapper extends ProgressEventNotifierSupport
{
private static final Logger logger = LoggerFactory.getLogger(BootStrapper.class);
/* endpoint that needs to be bootstrapped */
protected final InetAddressAndPort address;
/* token of the node being bootstrapped. */
protected final Collection tokens;
protected final TokenMetadata tokenMetadata;
public BootStrapper(InetAddressAndPort address, Collection tokens, TokenMetadata tmd)
{
assert address != null;
assert tokens != null && !tokens.isEmpty();
this.address = address;
this.tokens = tokens;
this.tokenMetadata = tmd;
}
public Future bootstrap(StreamStateStore stateStore, boolean useStrictConsistency)
{
logger.trace("Beginning bootstrap process");
RangeStreamer streamer = new RangeStreamer(tokenMetadata,
tokens,
address,
StreamOperation.BOOTSTRAP,
useStrictConsistency,
DatabaseDescriptor.getEndpointSnitch(),
stateStore,
true,
DatabaseDescriptor.getStreamingConnectionsPerHost());
final Collection nonLocalStrategyKeyspaces = Schema.instance.getNonLocalStrategyKeyspaces().names();
if (nonLocalStrategyKeyspaces.isEmpty())
logger.debug("Schema does not contain any non-local keyspaces to stream on bootstrap");
for (String keyspaceName : nonLocalStrategyKeyspaces)
{
AbstractReplicationStrategy strategy = Keyspace.open(keyspaceName).getReplicationStrategy();
streamer.addRanges(keyspaceName, strategy.getPendingAddressRanges(tokenMetadata, tokens, address));
}
StreamResultFuture bootstrapStreamResult = streamer.fetchAsync();
bootstrapStreamResult.addEventListener(new StreamEventHandler()
{
private final AtomicInteger receivedFiles = new AtomicInteger();
private final AtomicInteger totalFilesToReceive = new AtomicInteger();
@Override
public void handleStreamEvent(StreamEvent event)
{
switch (event.eventType)
{
case STREAM_PREPARED:
StreamEvent.SessionPreparedEvent prepared = (StreamEvent.SessionPreparedEvent) event;
int currentTotal = totalFilesToReceive.addAndGet((int) prepared.session.getTotalFilesToReceive());
ProgressEvent prepareProgress = new ProgressEvent(ProgressEventType.PROGRESS, receivedFiles.get(), currentTotal, "prepare with " + prepared.session.peer + " complete");
fireProgressEvent("bootstrap", prepareProgress);
break;
case FILE_PROGRESS:
StreamEvent.ProgressEvent progress = (StreamEvent.ProgressEvent) event;
if (progress.progress.isCompleted())
{
int received = receivedFiles.incrementAndGet();
ProgressEvent currentProgress = new ProgressEvent(ProgressEventType.PROGRESS, received, totalFilesToReceive.get(), "received file " + progress.progress.fileName);
fireProgressEvent("bootstrap", currentProgress);
}
break;
case STREAM_COMPLETE:
StreamEvent.SessionCompleteEvent completeEvent = (StreamEvent.SessionCompleteEvent) event;
ProgressEvent completeProgress = new ProgressEvent(ProgressEventType.PROGRESS, receivedFiles.get(), totalFilesToReceive.get(), "session with " + completeEvent.peer + " complete");
fireProgressEvent("bootstrap", completeProgress);
break;
}
}
@Override
public void onSuccess(StreamState streamState)
{
ProgressEventType type;
String message;
if (streamState.hasFailedSession())
{
type = ProgressEventType.ERROR;
message = "Some bootstrap stream failed";
}
else
{
type = ProgressEventType.SUCCESS;
message = "Bootstrap streaming success";
}
ProgressEvent currentProgress = new ProgressEvent(type, receivedFiles.get(), totalFilesToReceive.get(), message);
fireProgressEvent("bootstrap", currentProgress);
}
@Override
public void onFailure(Throwable throwable)
{
ProgressEvent currentProgress = new ProgressEvent(ProgressEventType.ERROR, receivedFiles.get(), totalFilesToReceive.get(), throwable.getMessage());
fireProgressEvent("bootstrap", currentProgress);
}
});
return bootstrapStreamResult;
}
/**
* if initialtoken was specified, use that (split on comma).
* otherwise, if allocationKeyspace is specified use the token allocation algorithm to generate suitable tokens
* else choose num_tokens tokens at random
*/
public static Collection getBootstrapTokens(final TokenMetadata metadata, InetAddressAndPort address, long schemaTimeoutMillis, long ringTimeoutMillis) throws ConfigurationException
{
String allocationKeyspace = DatabaseDescriptor.getAllocateTokensForKeyspace();
Integer allocationLocalRf = DatabaseDescriptor.getAllocateTokensForLocalRf();
Collection initialTokens = DatabaseDescriptor.getInitialTokens();
if (initialTokens.size() > 0 && allocationKeyspace != null)
logger.warn("manually specified tokens override automatic allocation");
// if user specified tokens, use those
if (initialTokens.size() > 0)
{
Collection tokens = getSpecifiedTokens(metadata, initialTokens);
BootstrapDiagnostics.useSpecifiedTokens(address, allocationKeyspace, tokens, DatabaseDescriptor.getNumTokens());
return tokens;
}
int numTokens = DatabaseDescriptor.getNumTokens();
if (numTokens < 1)
throw new ConfigurationException("num_tokens must be >= 1");
if (allocationKeyspace != null)
return allocateTokens(metadata, address, allocationKeyspace, numTokens, schemaTimeoutMillis, ringTimeoutMillis);
if (allocationLocalRf != null)
return allocateTokens(metadata, address, allocationLocalRf, numTokens, schemaTimeoutMillis, ringTimeoutMillis);
if (numTokens == 1)
logger.warn("Picking random token for a single vnode. You should probably add more vnodes and/or use the automatic token allocation mechanism.");
Collection tokens = getRandomTokens(metadata, numTokens);
BootstrapDiagnostics.useRandomTokens(address, metadata, numTokens, tokens);
return tokens;
}
private static Collection getSpecifiedTokens(final TokenMetadata metadata,
Collection initialTokens)
{
logger.info("tokens manually specified as {}", initialTokens);
List tokens = new ArrayList<>(initialTokens.size());
for (String tokenString : initialTokens)
{
Token token = metadata.partitioner.getTokenFactory().fromString(tokenString);
if (metadata.getEndpoint(token) != null)
throw new ConfigurationException("Bootstrapping to existing token " + tokenString + " is not allowed (decommission/removenode the old node first).");
tokens.add(token);
}
return tokens;
}
static Collection allocateTokens(final TokenMetadata metadata,
InetAddressAndPort address,
String allocationKeyspace,
int numTokens,
long schemaTimeoutMillis,
long ringTimeoutMillis)
{
StorageService.instance.waitForSchema(schemaTimeoutMillis, ringTimeoutMillis);
if (!FBUtilities.getBroadcastAddressAndPort().equals(InetAddressAndPort.getLoopbackAddress()))
Gossiper.waitToSettle();
Keyspace ks = Keyspace.open(allocationKeyspace);
if (ks == null)
throw new ConfigurationException("Problem opening token allocation keyspace " + allocationKeyspace);
AbstractReplicationStrategy rs = ks.getReplicationStrategy();
Collection tokens = TokenAllocation.allocateTokens(metadata, rs, address, numTokens);
BootstrapDiagnostics.tokensAllocated(address, metadata, allocationKeyspace, numTokens, tokens);
return tokens;
}
static Collection allocateTokens(final TokenMetadata metadata,
InetAddressAndPort address,
int rf,
int numTokens,
long schemaTimeoutMillis,
long ringTimeoutMillis)
{
StorageService.instance.waitForSchema(schemaTimeoutMillis, ringTimeoutMillis);
if (!FBUtilities.getBroadcastAddressAndPort().equals(InetAddressAndPort.getLoopbackAddress()))
Gossiper.waitToSettle();
Collection tokens = TokenAllocation.allocateTokens(metadata, rf, address, numTokens);
BootstrapDiagnostics.tokensAllocated(address, metadata, rf, numTokens, tokens);
return tokens;
}
public static Collection getRandomTokens(TokenMetadata metadata, int numTokens)
{
Set tokens = new HashSet<>(numTokens);
while (tokens.size() < numTokens)
{
Token token = metadata.partitioner.getRandomToken();
if (metadata.getEndpoint(token) == null)
tokens.add(token);
}
logger.info("Generated random tokens. tokens are {}", tokens);
return tokens;
}
}
© 2015 - 2024 Weber Informatics LLC | Privacy Policy