Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 2016, 2017 Cisco Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
package org.opendaylight.controller.cluster.sharding;
import static akka.actor.ActorRef.noSender;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static java.util.Objects.requireNonNull;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.dispatch.Mapper;
import akka.dispatch.OnComplete;
import akka.pattern.Patterns;
import akka.util.Timeout;
import com.google.common.base.Throwables;
import com.google.common.collect.ClassToInstanceMap;
import com.google.common.collect.ForwardingObject;
import com.google.common.collect.ImmutableClassToInstanceMap;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.AbstractMap.SimpleEntry;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.checkerframework.checker.lock.qual.GuardedBy;
import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.opendaylight.controller.cluster.access.concepts.MemberName;
import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
import org.opendaylight.controller.cluster.datastore.Shard;
import org.opendaylight.controller.cluster.datastore.config.Configuration;
import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor.ShardedDataTreeActorCreator;
import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener;
import org.opendaylight.controller.cluster.sharding.messages.LookupPrefixShard;
import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemovalLookup;
import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated;
import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved;
import org.opendaylight.controller.cluster.sharding.messages.StartConfigShardLookup;
import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
import org.opendaylight.mdsal.dom.api.DOMDataTreeLoopException;
import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
import org.opendaylight.mdsal.dom.api.DOMDataTreeServiceExtension;
import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService;
import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration;
import org.opendaylight.mdsal.dom.broker.ShardedDOMDataTree;
import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTable;
import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry;
import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.prefix.shard.configuration.rev170110.PrefixShards;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.compat.java8.FutureConverters;
import scala.concurrent.Future;
import scala.concurrent.Promise;
import scala.concurrent.duration.FiniteDuration;
/**
* A layer on top of DOMDataTreeService that distributes producer/shard registrations to remote nodes via
* {@link ShardedDataTreeActor}. Also provides QoL method for addition of prefix based clustered shard into the system.
*/
public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDataTreeShardingService,
DistributedShardFactory {
private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTree.class);
private static final int MAX_ACTOR_CREATION_RETRIES = 100;
private static final int ACTOR_RETRY_DELAY = 100;
private static final TimeUnit ACTOR_RETRY_TIME_UNIT = TimeUnit.MILLISECONDS;
private static final int LOOKUP_TASK_MAX_RETRIES = 100;
static final FiniteDuration SHARD_FUTURE_TIMEOUT_DURATION =
new FiniteDuration(LOOKUP_TASK_MAX_RETRIES * LOOKUP_TASK_MAX_RETRIES * 3, TimeUnit.SECONDS);
static final Timeout SHARD_FUTURE_TIMEOUT = new Timeout(SHARD_FUTURE_TIMEOUT_DURATION);
static final String ACTOR_ID = "ShardedDOMDataTreeFrontend";
private final ShardedDOMDataTree shardedDOMDataTree;
private final ActorSystem actorSystem;
private final DistributedDataStoreInterface distributedOperDatastore;
private final DistributedDataStoreInterface distributedConfigDatastore;
private final ActorRef shardedDataTreeActor;
private final MemberName memberName;
@GuardedBy("shards")
private final DOMDataTreePrefixTable> shards =
DOMDataTreePrefixTable.create();
private final EnumMap> configurationShardMap =
new EnumMap<>(LogicalDatastoreType.class);
private final EnumMap writerMap =
new EnumMap<>(LogicalDatastoreType.class);
private final PrefixedShardConfigUpdateHandler updateHandler;
public DistributedShardedDOMDataTree(final ActorSystemProvider actorSystemProvider,
final DistributedDataStoreInterface distributedOperDatastore,
final DistributedDataStoreInterface distributedConfigDatastore) {
this.actorSystem = requireNonNull(actorSystemProvider).getActorSystem();
this.distributedOperDatastore = requireNonNull(distributedOperDatastore);
this.distributedConfigDatastore = requireNonNull(distributedConfigDatastore);
shardedDOMDataTree = new ShardedDOMDataTree();
shardedDataTreeActor = createShardedDataTreeActor(actorSystem,
new ShardedDataTreeActorCreator()
.setShardingService(this)
.setActorSystem(actorSystem)
.setClusterWrapper(distributedConfigDatastore.getActorUtils().getClusterWrapper())
.setDistributedConfigDatastore(distributedConfigDatastore)
.setDistributedOperDatastore(distributedOperDatastore)
.setLookupTaskMaxRetries(LOOKUP_TASK_MAX_RETRIES),
ACTOR_ID);
this.memberName = distributedConfigDatastore.getActorUtils().getCurrentMemberName();
updateHandler = new PrefixedShardConfigUpdateHandler(shardedDataTreeActor,
distributedConfigDatastore.getActorUtils().getCurrentMemberName());
LOG.debug("{} - Starting prefix configuration shards", memberName);
createPrefixConfigShard(distributedConfigDatastore);
createPrefixConfigShard(distributedOperDatastore);
}
private static void createPrefixConfigShard(final DistributedDataStoreInterface dataStore) {
Configuration configuration = dataStore.getActorUtils().getConfiguration();
Collection memberNames = configuration.getUniqueMemberNamesForAllShards();
CreateShard createShardMessage =
new CreateShard(new ModuleShardConfiguration(PrefixShards.QNAME.getNamespace(),
"prefix-shard-configuration", ClusterUtils.PREFIX_CONFIG_SHARD_ID, ModuleShardStrategy.NAME,
memberNames),
Shard.builder(), dataStore.getActorUtils().getDatastoreContext());
dataStore.getActorUtils().getShardManager().tell(createShardMessage, noSender());
}
/**
* This will try to initialize prefix configuration shards upon their
* successful start. We need to create writers to these shards, so we can
* satisfy future {@link #createDistributedShard} and
* {@link #resolveShardAdditions} requests and update prefix configuration
* shards accordingly.
*
*
* We also need to initialize listeners on these shards, so we can react
* on changes made on them by other cluster members or even by ourselves.
*
*
* Finally, we need to be sure that default shards for both operational and
* configuration data stores are up and running and we have distributed
* shards frontend created for them.
*
*
* This is intended to be invoked by blueprint as initialization method.
*/
public void init() {
// create our writers to the configuration
try {
LOG.debug("{} - starting config shard lookup.", memberName);
// We have to wait for prefix config shards to be up and running
// so we can create datastore clients for them
handleConfigShardLookup().get(SHARD_FUTURE_TIMEOUT_DURATION.length(), SHARD_FUTURE_TIMEOUT_DURATION.unit());
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IllegalStateException("Prefix config shards not found", e);
}
try {
LOG.debug("{}: Prefix configuration shards ready - creating clients", memberName);
configurationShardMap.put(LogicalDatastoreType.CONFIGURATION,
createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
distributedConfigDatastore.getActorUtils()));
} catch (final DOMDataTreeShardCreationFailedException e) {
throw new IllegalStateException(
"Unable to create datastoreClient for config DS prefix configuration shard.", e);
}
try {
configurationShardMap.put(LogicalDatastoreType.OPERATIONAL,
createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
distributedOperDatastore.getActorUtils()));
} catch (final DOMDataTreeShardCreationFailedException e) {
throw new IllegalStateException(
"Unable to create datastoreClient for oper DS prefix configuration shard.", e);
}
writerMap.put(LogicalDatastoreType.CONFIGURATION, new PrefixedShardConfigWriter(
configurationShardMap.get(LogicalDatastoreType.CONFIGURATION).getKey()));
writerMap.put(LogicalDatastoreType.OPERATIONAL, new PrefixedShardConfigWriter(
configurationShardMap.get(LogicalDatastoreType.OPERATIONAL).getKey()));
updateHandler.initListener(distributedConfigDatastore, LogicalDatastoreType.CONFIGURATION);
updateHandler.initListener(distributedOperDatastore, LogicalDatastoreType.OPERATIONAL);
distributedConfigDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
distributedOperDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
//create shard registration for DEFAULT_SHARD
initDefaultShard(LogicalDatastoreType.CONFIGURATION);
initDefaultShard(LogicalDatastoreType.OPERATIONAL);
}
private ListenableFuture> handleConfigShardLookup() {
final ListenableFuture configFuture = lookupConfigShard(LogicalDatastoreType.CONFIGURATION);
final ListenableFuture operFuture = lookupConfigShard(LogicalDatastoreType.OPERATIONAL);
return Futures.allAsList(configFuture, operFuture);
}
private ListenableFuture lookupConfigShard(final LogicalDatastoreType type) {
final SettableFuture future = SettableFuture.create();
final Future