com.yahoo.vespa.hosted.provision.provisioning.Activator Maven / Gradle / Ivy
Show all versions of node-repository Show documentation
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.ParentHostUnavailableException;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Allocation;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Performs activation of resources for an application. E.g. nodes or load balancers.
*
* @author bratseth
*/
class Activator {
private final NodeRepository nodeRepository;
private final Optional loadBalancerProvisioner;
public Activator(NodeRepository nodeRepository, Optional loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
}
/** Activate required resources for given application */
public void activate(ApplicationId application, Collection hosts, NestedTransaction transaction) {
try (Mutex lock = nodeRepository.lock(application)) {
activateNodes(application, hosts, transaction, lock);
activateLoadBalancers(application, hosts, transaction, lock);
}
}
/**
* Add operations to activates nodes for an application to the given transaction.
* The operations are not effective until the transaction is committed.
*
* Pre condition: The application has a possibly empty set of nodes in each of reserved and active.
*
* Post condition: Nodes in reserved which are present in hosts
are moved to active.
* Nodes in active which are not present in hosts
are moved to inactive.
*
* @param transaction Transaction with operations to commit together with any operations done within the repository.
* @param application the application to allocate nodes for
* @param hosts the hosts to make the set of active nodes of this
* @param applicationLock application lock that must be held when calling this
*/
private void activateNodes(ApplicationId application, Collection hosts, NestedTransaction transaction,
@SuppressWarnings("unused") Mutex applicationLock) {
Set hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
NodeList allNodes = nodeRepository.list();
NodeList applicationNodes = allNodes.owner(application);
List reserved = applicationNodes.state(Node.State.reserved).asList();
List reservedToActivate = retainHostsInList(hostnames, reserved);
List active = applicationNodes.state(Node.State.active).asList();
List continuedActive = retainHostsInList(hostnames, active);
List allActive = new ArrayList<>(continuedActive);
allActive.addAll(reservedToActivate);
if (!containsAll(hostnames, allActive))
throw new IllegalArgumentException("Activation of " + application + " failed. " +
"Could not find all requested hosts." +
"\nRequested: " + hosts +
"\nReserved: " + toHostNames(reserved) +
"\nActive: " + toHostNames(active) +
"\nThis might happen if the time from reserving host to activation takes " +
"longer time than reservation expiry (the hosts will then no longer be reserved)");
validateParentHosts(application, allNodes, reservedToActivate);
List activeToRemove = removeHostsFromList(hostnames, active);
activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired
nodeRepository.deactivate(activeToRemove, transaction);
nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes
nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction);
}
/** Activate load balancers */
private void activateLoadBalancers(ApplicationId application, Collection hosts, NestedTransaction transaction,
@SuppressWarnings("unused") Mutex applicationLock) {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, clustersOf(hosts), applicationLock, transaction));
}
private static Set clustersOf(Collection hosts) {
return hosts.stream()
.map(HostSpec::membership)
.flatMap(Optional::stream)
.map(ClusterMembership::cluster)
.collect(Collectors.toUnmodifiableSet());
}
private static void validateParentHosts(ApplicationId application, NodeList nodes, List potentialChildren) {
Set parentHostnames = potentialChildren.stream()
.map(Node::parentHostname)
.flatMap(Optional::stream)
.collect(Collectors.toSet());
long numNonActive = nodes.asList().stream()
.filter(node -> parentHostnames.contains(node.hostname()))
.filter(node -> node.state() != Node.State.active)
.count();
if (numNonActive > 0) {
throw new ParentHostUnavailableException("Waiting for hosts to finish booting: " +
numNonActive + "/" + parentHostnames.size() + " left.");
}
}
private List retainHostsInList(Set hosts, List nodes) {
return nodes.stream().filter(node -> hosts.contains(node.hostname())).collect(Collectors.toList());
}
private List removeHostsFromList(Set hosts, List nodes) {
return nodes.stream().filter(node -> ! hosts.contains(node.hostname())).collect(Collectors.toList());
}
private Set toHostNames(List nodes) {
return nodes.stream().map(Node::hostname).collect(Collectors.toSet());
}
private boolean containsAll(Set hosts, List nodes) {
Set notFoundHosts = new HashSet<>(hosts);
for (Node node : nodes)
notFoundHosts.remove(node.hostname());
return notFoundHosts.isEmpty();
}
/**
* Returns the input nodes with the changes resulting from applying the settings in hosts to the given list of nodes.
*/
private List updateFrom(Collection hosts, List nodes) {
List updated = new ArrayList<>();
for (Node node : nodes) {
HostSpec hostSpec = getHost(node.hostname(), hosts);
node = hostSpec.membership().get().retired() ? node.retire(nodeRepository.clock().instant()) : node.unretire();
Allocation allocation = node.allocation().get().with(hostSpec.membership().get());
if (hostSpec.networkPorts().isPresent()) {
allocation = allocation.withNetworkPorts(hostSpec.networkPorts().get());
}
node = node.with(allocation);
if (hostSpec.flavor().isPresent()) // Docker nodes may change flavor
node = node.with(hostSpec.flavor().get());
updated.add(node);
}
return updated;
}
/**
* Returns the input nodes with any port allocations from the hosts
*/
private List updatePortsFrom(Collection hosts, List nodes) {
List updated = new ArrayList<>();
for (Node node : nodes) {
HostSpec hostSpec = getHost(node.hostname(), hosts);
Allocation allocation = node.allocation().get();
if (hostSpec.networkPorts().isPresent()) {
allocation = allocation.withNetworkPorts(hostSpec.networkPorts().get());
node = node.with(allocation);
}
updated.add(node);
}
return updated;
}
private HostSpec getHost(String hostname, Collection fromHosts) {
for (HostSpec host : fromHosts)
if (host.hostname().equals(hostname))
return host;
return null;
}
}