Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
The Apache Cassandra Project develops a highly scalable second-generation distributed database, bringing together Dynamo's fully distributed design and Bigtable's ColumnFamily-based data model.
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.service;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.management.openmbean.CompositeData;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Multimap;
import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.cassandra.concurrent.DebuggableThreadPoolExecutor;
import org.apache.cassandra.db.compaction.CompactionManager;
import org.apache.cassandra.locator.EndpointsByRange;
import org.apache.cassandra.locator.EndpointsForRange;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.concurrent.JMXEnabledThreadPoolExecutor;
import org.apache.cassandra.concurrent.NamedThreadFactory;
import org.apache.cassandra.concurrent.ScheduledExecutors;
import org.apache.cassandra.config.Config;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.Keyspace;
import org.apache.cassandra.dht.Range;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.exceptions.RequestFailureReason;
import org.apache.cassandra.gms.ApplicationState;
import org.apache.cassandra.gms.EndpointState;
import org.apache.cassandra.gms.FailureDetector;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.gms.IEndpointStateChangeSubscriber;
import org.apache.cassandra.gms.IFailureDetectionEventListener;
import org.apache.cassandra.gms.IFailureDetector;
import org.apache.cassandra.gms.VersionedValue;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.locator.TokenMetadata;
import org.apache.cassandra.metrics.RepairMetrics;
import org.apache.cassandra.net.RequestCallback;
import org.apache.cassandra.net.Verb;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.MessagingService;
import org.apache.cassandra.repair.CommonRange;
import org.apache.cassandra.repair.RepairJobDesc;
import org.apache.cassandra.repair.RepairParallelism;
import org.apache.cassandra.repair.RepairSession;
import org.apache.cassandra.repair.Scheduler;
import org.apache.cassandra.repair.consistent.CoordinatorSessions;
import org.apache.cassandra.repair.consistent.LocalSessions;
import org.apache.cassandra.repair.consistent.admin.CleanupSummary;
import org.apache.cassandra.repair.consistent.admin.PendingStats;
import org.apache.cassandra.repair.consistent.admin.RepairStats;
import org.apache.cassandra.repair.consistent.RepairedState;
import org.apache.cassandra.repair.consistent.admin.SchemaArgsParser;
import org.apache.cassandra.repair.messages.CleanupMessage;
import org.apache.cassandra.repair.messages.PrepareMessage;
import org.apache.cassandra.repair.messages.RepairMessage;
import org.apache.cassandra.repair.messages.RepairOption;
import org.apache.cassandra.repair.messages.SyncResponse;
import org.apache.cassandra.repair.messages.ValidationResponse;
import org.apache.cassandra.schema.TableId;
import org.apache.cassandra.streaming.PreviewKind;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.cassandra.utils.MBeanWrapper;
import org.apache.cassandra.utils.Pair;
import org.apache.cassandra.utils.UUIDGen;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.transform;
import static org.apache.cassandra.net.Verb.PREPARE_MSG;
/**
* ActiveRepairService is the starting point for manual "active" repairs.
*
* Each user triggered repair will correspond to one or multiple repair session,
* one for each token range to repair. On repair session might repair multiple
* column families. For each of those column families, the repair session will
* request merkle trees for each replica of the range being repaired, diff those
* trees upon receiving them, schedule the streaming ofthe parts to repair (based on
* the tree diffs) and wait for all those operation. See RepairSession for more
* details.
*
* The creation of a repair session is done through the submitRepairSession that
* returns a future on the completion of that session.
*/
public class ActiveRepairService implements IEndpointStateChangeSubscriber, IFailureDetectionEventListener, ActiveRepairServiceMBean
{
public enum ParentRepairStatus
{
IN_PROGRESS, COMPLETED, FAILED
}
public static class ConsistentSessions
{
public final LocalSessions local = new LocalSessions();
public final CoordinatorSessions coordinated = new CoordinatorSessions();
}
public final ConsistentSessions consistent = new ConsistentSessions();
private boolean registeredForEndpointChanges = false;
private static final Logger logger = LoggerFactory.getLogger(ActiveRepairService.class);
// singleton enforcement
public static final ActiveRepairService instance = new ActiveRepairService(FailureDetector.instance, Gossiper.instance);
public static final long UNREPAIRED_SSTABLE = 0;
public static final UUID NO_PENDING_REPAIR = null;
/**
* A map of active coordinator session.
*/
private final ConcurrentMap sessions = new ConcurrentHashMap<>();
private final ConcurrentMap parentRepairSessions = new ConcurrentHashMap<>();
static
{
RepairMetrics.init();
}
public static class RepairCommandExecutorHandle
{
private static final ThreadPoolExecutor repairCommandExecutor =
initializeExecutor(DatabaseDescriptor.getRepairCommandPoolSize(),
DatabaseDescriptor.getRepairCommandPoolFullStrategy());
}
@VisibleForTesting
static ThreadPoolExecutor initializeExecutor(int maxPoolSize, Config.RepairCommandPoolFullStrategy strategy)
{
int corePoolSize = 1;
BlockingQueue queue;
if (strategy == Config.RepairCommandPoolFullStrategy.reject)
{
// new threads will be created on demand up to max pool
// size so we can leave corePoolSize at 1 to start with
queue = new SynchronousQueue<>();
}
else
{
// new threads are only created if > corePoolSize threads are running
// and the queue is full, so set corePoolSize to the desired max as the
// queue will _never_ be full. Idle core threads will eventually time
// out and may be re-created if/when subsequent tasks are submitted.
corePoolSize = maxPoolSize;
queue = new LinkedBlockingQueue<>();
}
ThreadPoolExecutor executor = new JMXEnabledThreadPoolExecutor(corePoolSize,
maxPoolSize,
1,
TimeUnit.HOURS,
queue,
new NamedThreadFactory("Repair-Task"),
"internal",
new ThreadPoolExecutor.AbortPolicy());
// allow idle core threads to be terminated
executor.allowCoreThreadTimeOut(true);
return executor;
}
public static ThreadPoolExecutor repairCommandExecutor()
{
return RepairCommandExecutorHandle.repairCommandExecutor;
}
private final IFailureDetector failureDetector;
private final Gossiper gossiper;
private final Cache>> repairStatusByCmd;
public final DebuggableThreadPoolExecutor snapshotExecutor = DebuggableThreadPoolExecutor.createWithMaximumPoolSize("RepairSnapshotExecutor",
1,
1,
TimeUnit.HOURS);
public ActiveRepairService(IFailureDetector failureDetector, Gossiper gossiper)
{
this.failureDetector = failureDetector;
this.gossiper = gossiper;
this.repairStatusByCmd = CacheBuilder.newBuilder()
.expireAfterWrite(
Long.getLong("cassandra.parent_repair_status_expiry_seconds",
TimeUnit.SECONDS.convert(1, TimeUnit.DAYS)), TimeUnit.SECONDS)
// using weight wouldn't work so well, since it doesn't reflect mutation of cached data
// see https://github.com/google/guava/wiki/CachesExplained
// We assume each entry is unlikely to be much more than 100 bytes, so bounding the size should be sufficient.
.maximumSize(Long.getLong("cassandra.parent_repair_status_cache_size", 100_000))
.build();
MBeanWrapper.instance.registerMBean(this, MBEAN_NAME);
}
public void start()
{
consistent.local.start();
ScheduledExecutors.optionalTasks.scheduleAtFixedRate(consistent.local::cleanup, 0,
LocalSessions.CLEANUP_INTERVAL,
TimeUnit.SECONDS);
}
public void stop()
{
consistent.local.stop();
}
@Override
public List