org.apache.brooklyn.entity.nosql.couchbase.CouchbaseClusterImpl Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of brooklyn-software-nosql Show documentation
Show all versions of brooklyn-software-nosql Show documentation
Brooklyn entities for NoSQL data store software entities
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.brooklyn.entity.nosql.couchbase;
import static org.apache.brooklyn.util.JavaGroovyEquivalents.groovyTruth;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.Nonnull;
import org.apache.brooklyn.api.entity.Entity;
import org.apache.brooklyn.api.entity.EntitySpec;
import org.apache.brooklyn.api.policy.PolicySpec;
import org.apache.brooklyn.api.sensor.AttributeSensor;
import org.apache.brooklyn.core.config.render.RendererHints;
import org.apache.brooklyn.core.effector.Effectors;
import org.apache.brooklyn.core.entity.Attributes;
import org.apache.brooklyn.core.entity.Entities;
import org.apache.brooklyn.core.entity.EntityInternal;
import org.apache.brooklyn.core.entity.lifecycle.ServiceStateLogic;
import org.apache.brooklyn.core.entity.trait.Startable;
import org.apache.brooklyn.core.location.access.BrooklynAccessUtils;
import org.apache.brooklyn.core.sensor.DependentConfiguration;
import org.apache.brooklyn.enricher.stock.Enrichers;
import org.apache.brooklyn.entity.group.AbstractMembershipTrackingPolicy;
import org.apache.brooklyn.entity.group.DynamicClusterImpl;
import org.apache.brooklyn.entity.software.base.SoftwareProcess;
import org.apache.brooklyn.feed.http.HttpFeed;
import org.apache.brooklyn.feed.http.HttpPollConfig;
import org.apache.brooklyn.feed.http.HttpValueFunctions;
import org.apache.brooklyn.feed.http.JsonFunctions;
import org.apache.brooklyn.util.http.HttpToolResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.brooklyn.util.collections.CollectionFunctionals;
import org.apache.brooklyn.util.collections.MutableSet;
import org.apache.brooklyn.util.collections.QuorumCheck;
import org.apache.brooklyn.util.core.task.DynamicTasks;
import org.apache.brooklyn.util.core.task.TaskBuilder;
import org.apache.brooklyn.util.core.task.Tasks;
import org.apache.brooklyn.util.exceptions.Exceptions;
import org.apache.brooklyn.util.guava.Functionals;
import org.apache.brooklyn.util.guava.IfFunctions;
import org.apache.brooklyn.util.math.MathPredicates;
import org.apache.brooklyn.util.text.ByteSizeStrings;
import org.apache.brooklyn.util.text.StringFunctions;
import org.apache.brooklyn.util.text.Strings;
import org.apache.brooklyn.util.time.Duration;
import org.apache.brooklyn.util.time.Time;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicates;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
public class CouchbaseClusterImpl extends DynamicClusterImpl implements CouchbaseCluster {
/*
* Refactoring required:
*
* Currently, on start() the cluster waits for an arbitrary SERVICE_UP_TIME_OUT (3 minutes) before assuming that a quorate
* number of servers are available. The servers are then added to the cluster, and a further wait period of
* DELAY_BEFORE_ADVERTISING_CLUSTER (30 seconds) is used before advertising the cluster
*
* DELAY_BEFORE_ADVERTISING_CLUSTER: It should be possible to refactor this away by adding a repeater that will poll
* the REST API of the primary node (once established) until the API indicates that the cluster is available
*
* SERVICE_UP_TIME_OUT: The refactoring of this would be more substantial. One method would be to remove the bulk of the
* logic from the start() method, and rely entirely on the membership tracking policy and the onServerPoolMemberChanged()
* method. The addition of a RUNNING sensor on the nodes would allow the cluster to determine that a node is up and
* running but has not yet been added to the cluster. The IS_CLUSTER_INITIALIZED key could be used to determine whether
* or not the cluster should be initialized, or a node simply added to an existing cluster. A repeater could be used
* in the driver's to ensure that the method does not return until the node has been fully added
*
* There is an (incomplete) first-pass at this here: https://github.com/Nakomis/incubator-brooklyn/compare/couchbase-running-sensor
* however, there have been significant changes to the cluster initialization since that work was done so it will probably
* need to be re-done
*
* Additionally, during bucket creation, a HttpPoll is used to check that the bucket has been created. This should be
* refactored to use a Repeater in CouchbaseNodeSshDriver.bucketCreate() in a similar way to the one employed in
* CouchbaseNodeSshDriver.rebalance(). Were this done, this class could simply queue the bucket creation tasks
*
*/
private static final Logger log = LoggerFactory.getLogger(CouchbaseClusterImpl.class);
private final Object mutex = new Object[0];
// Used to serialize bucket creation as only one bucket can be created at a time,
// so a feed is used to determine when a bucket has finished being created
private final AtomicReference resetBucketCreation = new AtomicReference();
@Override
public void init() {
log.info("Initializing the Couchbase cluster...");
super.init();
enrichers().add(
Enrichers.builder()
.transforming(COUCHBASE_CLUSTER_UP_NODES)
.from(this)
.publishing(COUCHBASE_CLUSTER_UP_NODE_ADDRESSES)
.computing(new ListOfHostAndPort()).build() );
enrichers().add(
Enrichers.builder()
.transforming(COUCHBASE_CLUSTER_UP_NODE_ADDRESSES)
.from(this)
.publishing(COUCHBASE_CLUSTER_CONNECTION_URL)
.computing(
IfFunctions.>ifPredicate(
Predicates.compose(MathPredicates.lessThan(getConfig(CouchbaseCluster.INITIAL_QUORUM_SIZE)),
CollectionFunctionals.sizeFunction(0)) )
.value((String)null)
.defaultApply(
Functionals.chain(
CollectionFunctionals.>limit(4),
StringFunctions.joiner(","),
StringFunctions.formatter("http://%s/"))) )
.build() );
Map extends AttributeSensor extends Number>, ? extends AttributeSensor extends Number>> enricherSetup =
ImmutableMap., AttributeSensor extends Number>>builder()
.put(CouchbaseNode.OPS, CouchbaseCluster.OPS_PER_NODE)
.put(CouchbaseNode.COUCH_DOCS_DATA_SIZE, CouchbaseCluster.COUCH_DOCS_DATA_SIZE_PER_NODE)
.put(CouchbaseNode.COUCH_DOCS_ACTUAL_DISK_SIZE, CouchbaseCluster.COUCH_DOCS_ACTUAL_DISK_SIZE_PER_NODE)
.put(CouchbaseNode.EP_BG_FETCHED, CouchbaseCluster.EP_BG_FETCHED_PER_NODE)
.put(CouchbaseNode.MEM_USED, CouchbaseCluster.MEM_USED_PER_NODE)
.put(CouchbaseNode.COUCH_VIEWS_ACTUAL_DISK_SIZE, CouchbaseCluster.COUCH_VIEWS_ACTUAL_DISK_SIZE_PER_NODE)
.put(CouchbaseNode.CURR_ITEMS, CouchbaseCluster.CURR_ITEMS_PER_NODE)
.put(CouchbaseNode.VB_REPLICA_CURR_ITEMS, CouchbaseCluster.VB_REPLICA_CURR_ITEMS_PER_NODE)
.put(CouchbaseNode.COUCH_VIEWS_DATA_SIZE, CouchbaseCluster.COUCH_VIEWS_DATA_SIZE_PER_NODE)
.put(CouchbaseNode.GET_HITS, CouchbaseCluster.GET_HITS_PER_NODE)
.put(CouchbaseNode.CMD_GET, CouchbaseCluster.CMD_GET_PER_NODE)
.put(CouchbaseNode.CURR_ITEMS_TOT, CouchbaseCluster.CURR_ITEMS_TOT_PER_NODE)
.build();
for (AttributeSensor extends Number> nodeSensor : enricherSetup.keySet()) {
addSummingMemberEnricher(nodeSensor);
addAveragingMemberEnricher(nodeSensor, enricherSetup.get(nodeSensor));
}
enrichers().add(Enrichers.builder().updatingMap(Attributes.SERVICE_NOT_UP_INDICATORS)
.from(IS_CLUSTER_INITIALIZED).computing(
IfFunctions.ifNotEquals(true).value("The cluster is not yet completely initialized")
.defaultValue(null).build()).build() );
}
private void addAveragingMemberEnricher(AttributeSensor extends Number> fromSensor, AttributeSensor extends Number> toSensor) {
enrichers().add(Enrichers.builder()
.aggregating(fromSensor)
.publishing(toSensor)
.fromMembers()
.computingAverage()
.build()
);
}
private void addSummingMemberEnricher(AttributeSensor extends Number> source) {
enrichers().add(Enrichers.builder()
.aggregating(source)
.publishing(source)
.fromMembers()
.computingSum()
.build()
);
}
@Override
protected void doStart() {
sensors().set(IS_CLUSTER_INITIALIZED, false);
super.doStart();
connectSensors();
sensors().set(BUCKET_CREATION_IN_PROGRESS, false);
//start timeout before adding the servers
Tasks.setBlockingDetails("Pausing while Couchbase stabilizes");
Time.sleep(getConfig(NODES_STARTED_STABILIZATION_DELAY));
Optional> upNodes = Optional.>fromNullable(getAttribute(COUCHBASE_CLUSTER_UP_NODES));
if (upNodes.isPresent() && !upNodes.get().isEmpty()) {
Tasks.setBlockingDetails("Adding servers to Couchbase");
//TODO: select a new primary node if this one fails
Entity primaryNode = upNodes.get().iterator().next();
((EntityInternal) primaryNode).sensors().set(CouchbaseNode.IS_PRIMARY_NODE, true);
sensors().set(COUCHBASE_PRIMARY_NODE, primaryNode);
Set serversToAdd = MutableSet.copyOf(getUpNodes());
if (serversToAdd.size() >= getQuorumSize() && serversToAdd.size() > 1) {
log.info("Number of SERVICE_UP nodes:{} in cluster:{} reached Quorum:{}, adding the servers", new Object[]{serversToAdd.size(), getId(), getQuorumSize()});
addServers(serversToAdd);
//wait for servers to be added to the couchbase server
try {
Tasks.setBlockingDetails("Delaying before advertising cluster up");
Time.sleep(getConfig(DELAY_BEFORE_ADVERTISING_CLUSTER));
} finally {
Tasks.resetBlockingDetails();
}
getPrimaryNode().rebalance();
} else {
if (getQuorumSize()>1) {
log.warn(this+" is not quorate; will likely fail later, but proceeding for now");
}
for (Entity server: serversToAdd) {
((EntityInternal) server).sensors().set(CouchbaseNode.IS_IN_CLUSTER, true);
}
}
if (getConfig(CREATE_BUCKETS)!=null) {
try {
Tasks.setBlockingDetails("Creating buckets in Couchbase");
createBuckets();
DependentConfiguration.waitInTaskForAttributeReady(this, CouchbaseCluster.BUCKET_CREATION_IN_PROGRESS, Predicates.equalTo(false));
} finally {
Tasks.resetBlockingDetails();
}
}
if (getConfig(REPLICATION)!=null) {
try {
Tasks.setBlockingDetails("Configuring replication rules");
List