All Downloads are FREE. Search and download functionalities are using the official Maven repository.

nextflow.scheduler.Scheduler.groovy Maven / Gradle / Ivy

Go to download

A DSL modelled around the UNIX pipe concept, that simplifies writing parallel and scalable pipelines in a portable manner(forked from nextflow.io)

The newest version!
/*
 * Copyright 2013-2019, Centre for Genomic Regulation (CRG)
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package nextflow.scheduler
import static nextflow.scheduler.Protocol.PENDING_TASKS_CACHE
import static nextflow.scheduler.Protocol.TOPIC_AGENT_EVENTS
import static nextflow.scheduler.Protocol.TOPIC_SCHEDULER_EVENTS

import javax.cache.CacheException
import java.util.concurrent.BlockingQueue
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.LinkedBlockingQueue

import groovy.transform.CompileStatic
import groovy.transform.PackageScope
import nextflow.cloud.CloudSpotTerminationException
import nextflow.daemon.IgGridFactory
import nextflow.executor.IgBaseTask
import nextflow.processor.TaskId
import nextflow.processor.TaskPollingMonitor
import nextflow.scheduler.Protocol.NodeData
import nextflow.scheduler.Protocol.NodeIdle
import nextflow.scheduler.Protocol.NodeShutdown
import nextflow.scheduler.Protocol.TaskAvail
import nextflow.scheduler.Protocol.TaskCancel
import nextflow.scheduler.Protocol.TaskComplete
import nextflow.scheduler.Protocol.TaskHolder
import nextflow.scheduler.Protocol.TaskStart
import nextflow.scheduler.Protocol.NodeRetired
import org.apache.ignite.Ignite
import org.apache.ignite.IgniteCache
import org.apache.ignite.IgniteInterruptedException
import org.apache.ignite.events.DiscoveryEvent
import org.apache.ignite.events.Event
import org.apache.ignite.events.EventType
import org.apache.ignite.lang.IgniteBiPredicate
import org.apache.ignite.lang.IgniteCallable
import org.apache.ignite.lang.IgnitePredicate
import org.apache.ignite.resources.IgniteInstanceResource
import org.slf4j.Logger
import org.slf4j.LoggerFactory
/**
 * Implements the scheduler controller logic
 *
 * @author Paolo Di Tommaso 
 */
@CompileStatic
class Scheduler {

    private static final Logger log = LoggerFactory.getLogger(Scheduler)

    static private class ClusterDiscovery implements IgniteCallable {

        @IgniteInstanceResource
        private Ignite ignite

        @Override
        NodeData call() throws Exception {
            def clusterConfig = IgGridFactory.instance().clusterConfig
            return NodeData.create(clusterConfig, ignite)
        }
    }

    /**
     * Local map holding all tasks submitted to a remote agent for execution
     */
    private Map scheduledTasks

    /**
     * Local map holding completed task until they are not drained by the {@link TaskPollingMonitor}
     */
    private Map completedTasks

    /**
     * Distributed map holding all task to be executed
     */
    private IgniteCache pendingTasks

    /**
     * Map each node UUID to the associated host name
     */
    private Map workerNodes

    /**
     * {@link Ignite} instance
     */
    private Ignite ignite

    /**
     * Reference to the {@link TaskPollingMonitor} instance
     */
    private TaskPollingMonitor monitor

    /**
     * Holds a queue of received message to be processed
     */
    private BlockingQueue messageQueue = new LinkedBlockingQueue<>()

    /**
     * Thread which process the messages in the {@link #messageQueue}
     */
    private Thread messageProcessor

    /**
     * The auto-scaling policy
     */
    private Autoscaler autoscaler

    private boolean cloudEnabled

    private long bootTimestamp


    /**
     * Initialize the scheduler instance
     *
     * @param ignite An {@link Ignite} instance
     * @param monitor A {@link TaskPollingMonitor} instance
     */
    Scheduler init(Ignite ignite, TaskPollingMonitor monitor) {
        assert ignite
        assert monitor

        this.ignite = ignite
        this.monitor = monitor
        this.bootTimestamp = System.currentTimeMillis()
        this.workerNodes = new ConcurrentHashMap<>()
        this.scheduledTasks = new ConcurrentHashMap<>()
        this.completedTasks = new ConcurrentHashMap<>()
        this.pendingTasks = ignite.cache(PENDING_TASKS_CACHE)

        discoverWorkers()
        createEventProcessor()
        registerEvents()

        return this
    }

    void registerAutoscaler( Autoscaler autoscaler ) {
        this.autoscaler = autoscaler
        this.autoscaler.init(workerNodes, scheduledTasks)
        this.cloudEnabled = true
    }

    /**
     * Discover the nodes that made-up the cluster
     */
    private void discoverWorkers() {

        def nodes = ignite.compute().broadcast(new ClusterDiscovery())

        def buffer = new StringBuilder("+++ Initial cluster topology:\n")
        nodes.each { node ->
            workerNodes[node.nodeId] = node
            buffer << '- ' << node.toString() << '\n'
        }

        log.debug buffer.toString()
    }


    @PackageScope boolean isRunning() {
        messageProcessor.isAlive()
    }

    private void createEventProcessor() {

        messageProcessor = Thread.start('scheduler-thread') {

            while( true ) {
                try {
                    messageQueue.take().call()
                }
                catch( InterruptedException e ) {
                    // time to die
                    break
                }
                catch( Throwable e ) {
                    log.debug("+++ Can't process received message", e)
                }
            }
        }
    }

    /**
     * Dispatch a generic message to a concrete message handler
     *
     * @param sender The {@link UUID} identifier of the node sending the message
     * @param message The message object e.g. {@link TaskStart}
     */
    private IgniteBiPredicate createMessageDispatcher() {

        { UUID sender, Object message ->

            //
            // note: the message is not processed inline, but added as a closure
            // in the `messageQueue` object to be processed orderly in the same thread
            //
            messageQueue << {

                if( message instanceof TaskStart ) {
                    onTaskStart(sender, message)
                }
                else if( message instanceof TaskComplete ) {
                    onTaskComplete(sender, message)
                }
                else if( message instanceof NodeData ) {
                    onNodeStart(sender, message)
                }
                else if( message instanceof NodeIdle ) {
                    onNodeIdle(sender, message)
                }
                else if( message instanceof NodeRetired ) {
                    onNodeRetired(sender, message)
                }
                else {
                    throw new IllegalArgumentException("Unknown worker message: $message")
                }

            }
            return true

        } as IgniteBiPredicate

    }

    /**
     * Dispatch a generic event to a concrete message handler
     *
     * @param event The {@link Event} object received
     */
    private IgnitePredicate createEventDispatcher() {

        { Event event ->

            messageQueue << {
                if( event instanceof DiscoveryEvent ) {
                    if( event.type() == EventType.EVT_NODE_LEFT ) {
                        onNodeLeft(event.eventNode().id())
                        return
                    }
                    if( event.type() == EventType.EVT_NODE_FAILED ) {
                        onNodeFailed(event.eventNode().id())
                        return
                    }

                }

                throw new IllegalArgumentException("Unknown event: $event")
            }
            return true

        } as IgnitePredicate

    }

    /**
     * Registers the events and messages listen by the scheduler
     *
     * Note: events received are appended to the {@link #messageQueue} to be
     * processed asynchronously. This is required otherwise Ignite may stall (hang)
     * when sending a new message in the same thread of the arriving message
     */
    private registerEvents() {
        // -- listen for events from the scheduler agent to this class
        ignite
            .message()
            .localListen( TOPIC_SCHEDULER_EVENTS, createMessageDispatcher() )

        // -- listen for a node left event
        ignite
            .events()
            .localListen( createEventDispatcher(), EventType.EVT_NODE_LEFT )

        // -- listen for a node left failed
        ignite
            .events()
            .localListen( createEventDispatcher(), EventType.EVT_NODE_FAILED )
    }

    /**
     * Schedule one or more tasks for execution. Each {@link IgBaseTask} object is
     * added to the {@link #pendingTasks} distributed cache from where it will be
     * picked from a remote scheduler agent to be processed
     *
     * @param tasks One or more {@link IgBaseTask} instances
     *
     */
    void schedule( IgBaseTask... tasks ) {
        messageQueue << { schedule0(tasks) }
    }

    private void schedule0( IgBaseTask... tasks ) {

        log.trace "+++ Scheduling tasks: taskId=${tasks.collect{ IgBaseTask t -> t.taskId }.join(',')}"

        for( int i=0; i)scheduledTasks
                .values()
                .findResults { TaskHolder it -> it.worker == nodeId ? it.task : null }

        // -- reschedule matching tasks for execution
        if( !tasks ) {
            log.trace "+++ No pending task on $reason node: [${hostName(nodeId)}]"
            return
        }

        log.trace "+++ Dropping tasks on $reason node: [${hostName(nodeId)}] taskId=${tasks.collect{ it.taskId }.join(', ') ?: 'n/a'}"
        def itr = tasks.iterator()
        while( itr.hasNext() ) {
            def task = itr.next()
            // -- simulate an error message
            def cause = (reason=='retired'
                        ? new CloudSpotTerminationException("Computing node was retired: [${hostName(nodeId)}]")
                        : new RuntimeException("Task aborted due to failure on node: [${hostName(nodeId)}]") )
            def failure = TaskComplete.error(task, cause)
            onTaskComplete(nodeId, failure)
        }
    }

    /**
     * Check if the task with the specified id has started
     *
     * @param taskId The task identifier
     * @return {@code true} if the task has started, or {@code false} otherwise
     */
    boolean checkTaskStarted( TaskId taskId ) {
        scheduledTasks.get(taskId)?.started || completedTasks.containsKey(taskId)
    }

    /**
     * Check if the task with the specified id has completed
     *
     * @param taskId The task identifier
     * @return {@code true} if the task has completed, or {@code false} otherwise
     */
    boolean checkTaskCompleted( TaskId taskId ) {
        completedTasks.containsKey(taskId)
    }

    boolean checkTaskFailed( TaskId taskId ) {
        completedTasks.get(taskId)?.error != null
    }

    /**
     * Cancel the execution of a task
     *
     * @param taskId The {@link TaskId} ID of the task execution to cancel
     */
    void cancelTask( TaskId taskId ) {

        messageQueue << {

            log.trace "+++ Cancelling task: taskId=${taskId}"
            boolean removed = false
            try {
                 removed = pendingTasks.remove(taskId)
            }
            catch (CacheException e) {
                if( !(e.cause instanceof IgniteInterruptedException) )
                    throw e
            }

            def holder = scheduledTasks.get(taskId)
            if( holder ) {
                if( holder.worker ) {
                    def worker = ignite.cluster().forNodeId(holder.worker)
                    ignite.message(worker).send( TOPIC_AGENT_EVENTS, new TaskCancel(taskId) )
                }
                scheduledTasks.remove(taskId)
            }

            if( !removed && !holder ) {
                log.trace "+++ Oops.. Unable to cancel task: taskId=${taskId}"
            }
        }
    }

    /**
     * Get the task runtime information and remove from the {@link #scheduledTasks} structure
     *
     * @param taskId The id of the task to retrieve
     * @return A {@link TaskHolder} containing the task runtime info or {@code null} if the task is not available
     */
    TaskHolder removeTaskCompleted( TaskId taskId ) {
        def result = completedTasks.get(taskId)
        completedTasks.remove(taskId)
        return result
    }

    String dumpScheduledTasksStatus() {
        def result = new StringBuilder()
        def itr = scheduledTasks.values().iterator()
        while( itr.hasNext() ) {
            result << itr.next().toString()
        }
        return result.toString()
    }

    /**
     * Shutdown scheduler remote agents by sending a {@link Protocol#TOPIC_AGENT_EVENTS} message
     */
    void shutdownRemoteAgents() {
        final group = ignite.cluster().forRemotes()
        if( group.node() ) try {
            ignite.message(group).send( TOPIC_AGENT_EVENTS, NodeShutdown.INSTANCE )
        }
        catch( Exception e ) {
            log.warn("+++ Unexpected error notifying remote nodes shutdown", e)
        }
    }

    /**
     * Shutdown the scheduler object
     */
    void shutdownScheduler() {
        messageProcessor?.interrupt()
        autoscaler?.closeQuietly()
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy