org.graylog.scheduler.DBJobTriggerService Maven / Gradle / Ivy
/*
* Copyright (C) 2020 Graylog, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the Server Side Public License, version 1,
* as published by MongoDB, Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Server Side Public License for more details.
*
* You should have received a copy of the Server Side Public License
* along with this program. If not, see
* .
*/
package org.graylog.scheduler;
import com.github.joschi.jadconfig.util.Duration;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Sets;
import com.mongodb.BasicDBObject;
import one.util.streamex.StreamEx;
import org.bson.types.ObjectId;
import org.graylog.scheduler.capabilities.SchedulerCapabilitiesService;
import org.graylog.scheduler.clock.JobSchedulerClock;
import org.graylog.scheduler.schedule.OnceJobSchedule;
import org.graylog2.bindings.providers.MongoJackObjectMapperProvider;
import org.graylog2.database.MongoConnection;
import org.graylog2.plugin.system.NodeId;
import org.graylog2.shared.utilities.MongoQueryUtils;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.mongojack.DBCursor;
import org.mongojack.DBQuery;
import org.mongojack.DBQuery.Query;
import org.mongojack.DBSort;
import org.mongojack.DBUpdate;
import org.mongojack.JacksonDBCollection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.inject.Named;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static com.google.common.base.Strings.isNullOrEmpty;
import static java.util.Objects.requireNonNull;
import static org.graylog.scheduler.JobSchedulerConfiguration.LOCK_EXPIRATION_DURATION;
// This class does NOT use PaginatedDbService because we use the triggers collection for locking and need to handle
// updates very carefully.
public class DBJobTriggerService {
private final static Logger LOG = LoggerFactory.getLogger(DBJobTriggerService.class);
public static final String COLLECTION_NAME = "scheduler_triggers";
private static final String FIELD_ID = "_id";
static final String FIELD_JOB_DEFINITION_ID = JobTriggerDto.FIELD_JOB_DEFINITION_ID;
private static final String FIELD_LOCK_OWNER = JobTriggerDto.FIELD_LOCK + "." + JobTriggerLock.FIELD_OWNER;
private static final String FIELD_LAST_LOCK_OWNER = JobTriggerDto.FIELD_LOCK + "." + JobTriggerLock.FIELD_LAST_OWNER;
private static final String FIELD_PROGRESS = JobTriggerDto.FIELD_LOCK + "." + JobTriggerLock.FIELD_PROGRESS;
private static final String FIELD_LAST_LOCK_TIME = JobTriggerDto.FIELD_LOCK + "." + JobTriggerLock.FIELD_LAST_LOCK_TIME;
private static final String FIELD_NEXT_TIME = JobTriggerDto.FIELD_NEXT_TIME;
private static final String FIELD_START_TIME = JobTriggerDto.FIELD_START_TIME;
private static final String FIELD_END_TIME = JobTriggerDto.FIELD_END_TIME;
private static final String FIELD_STATUS = JobTriggerDto.FIELD_STATUS;
private static final String FIELD_SCHEDULE = JobTriggerDto.FIELD_SCHEDULE;
private static final String FIELD_DATA = JobTriggerDto.FIELD_DATA;
private static final String FIELD_UPDATED_AT = JobTriggerDto.FIELD_UPDATED_AT;
private static final String FIELD_TRIGGERED_AT = JobTriggerDto.FIELD_TRIGGERED_AT;
private static final String FIELD_CONSTRAINTS = JobTriggerDto.FIELD_CONSTRAINTS;
private static final String FIELD_LAST_EXECUTION_TIME = JobTriggerDto.FIELD_EXECUTION_DURATION;
private static final String FIELD_JOB_DEFINITION_TYPE = JobTriggerDto.FIELD_JOB_DEFINITION_TYPE;
private final String nodeId;
private final JacksonDBCollection db;
private final JobSchedulerClock clock;
private final SchedulerCapabilitiesService schedulerCapabilitiesService;
private final Duration lockExpirationDuration;
@Inject
public DBJobTriggerService(MongoConnection mongoConnection,
MongoJackObjectMapperProvider mapper,
NodeId nodeId,
JobSchedulerClock clock,
SchedulerCapabilitiesService schedulerCapabilitiesService,
@Named(LOCK_EXPIRATION_DURATION) Duration lockExpirationDuration) {
this.nodeId = nodeId.getNodeId();
this.clock = clock;
this.schedulerCapabilitiesService = schedulerCapabilitiesService;
this.lockExpirationDuration = lockExpirationDuration;
this.db = JacksonDBCollection.wrap(mongoConnection.getDatabase().getCollection(COLLECTION_NAME),
JobTriggerDto.class,
ObjectId.class,
mapper.get());
db.createIndex(new BasicDBObject(FIELD_JOB_DEFINITION_ID, 1));
db.createIndex(new BasicDBObject(FIELD_LOCK_OWNER, 1));
db.createIndex(new BasicDBObject(FIELD_STATUS, 1));
db.createIndex(new BasicDBObject(FIELD_START_TIME, 1));
db.createIndex(new BasicDBObject(FIELD_END_TIME, 1));
db.createIndex(new BasicDBObject(FIELD_NEXT_TIME, 1));
db.createIndex(new BasicDBObject(FIELD_CONSTRAINTS, 1));
db.createIndex(new BasicDBObject(FIELD_JOB_DEFINITION_TYPE, 1));
}
/**
* Loads all existing records and returns them.
*
* @return list of records
*/
public List all() {
return ImmutableList.copyOf(db.find().sort(DBSort.desc(FIELD_ID)).iterator());
}
/**
* Loads the record for the given ID.
*
* @param id record ID to load
* @return filled optional when the record exists, an empty optional otherwise
*/
public Optional get(String id) {
return Optional.ofNullable(db.findOneById(new ObjectId(id)));
}
/**
* Returns one trigger for the given job definition ID.
*
* TODO: Don't throw exception when there is more than one trigger for a job definition. (see source code)
*
* @param jobDefinitionId the job definition ID
* @return One found job trigger
*/
public Optional getOneForJob(String jobDefinitionId) {
final List triggers = getAllForJob(jobDefinitionId);
// We are currently expecting only one trigger per job definition. This will most probably change in the
// future once we extend our scheduler usage.
// TODO: Don't throw exception when there is more than one trigger for a job definition.
// To be able to do this, we need some kind of label system to make sure we can differentiate between
// automatically created triggers (e.g. by event definition) and manually created ones.
if (triggers.size() > 1) {
throw new IllegalStateException("More than one trigger for job definition <" + jobDefinitionId + ">");
}
return triggers.stream().findFirst();
}
public List getAllForJob(String jobDefinitionId) {
if (isNullOrEmpty(jobDefinitionId)) {
throw new IllegalArgumentException("jobDefinitionId cannot be null or empty");
}
final Query query = DBQuery.is(FIELD_JOB_DEFINITION_ID, jobDefinitionId);
try (final DBCursor cursor = db.find(query)) {
return ImmutableList.copyOf(cursor.iterator());
}
}
/**
* Returns all job triggers for the given job definition IDs, grouped by job definition ID.
*
* TODO: Don't throw exception when there is more than one trigger for a job definition. (see source code)
*
* @param jobDefinitionIds the job definition IDs
* @return list of found job triggers
*/
public Map> getForJobs(Collection jobDefinitionIds) {
if (jobDefinitionIds == null) {
throw new IllegalArgumentException("jobDefinitionIds cannot be null");
}
final Set queryValues = jobDefinitionIds.stream()
.filter(Objects::nonNull)
.filter(id -> !isNullOrEmpty(id))
.collect(Collectors.toSet());
final Query query = DBQuery.in(FIELD_JOB_DEFINITION_ID, queryValues);
final Map> groupedTriggers = StreamEx.of(db.find(query).toArray())
.groupingBy(JobTriggerDto::jobDefinitionId);
// We are currently expecting only one trigger per job definition. This will most probably change in the
// future once we extend our scheduler usage.
// TODO: Don't throw exception when there is more than one trigger for a job definition.
// To be able to do this, we need some kind of label system to make sure we can differentiate between
// automatically created triggers (e.g. by event definition) and manually created ones.
for (Map.Entry> entry : groupedTriggers.entrySet()) {
if (entry.getValue().size() > 1) {
throw new IllegalStateException("More than one trigger for job definition <" + entry.getKey() + ">");
}
}
return groupedTriggers;
}
/**
* Creates a new record in the database. The given {@link JobTriggerDto} object must not have an ID to make
* sure a new record is created.
*
* @param trigger the new trigger object (without an ID set)
* @return the newly created trigger object
* @throws IllegalArgumentException if the passed trigger has an ID set
*/
public JobTriggerDto create(JobTriggerDto trigger) {
requireNonNull(trigger, "trigger cannot be null");
// Make sure we don't save triggers that have an ID. That would potentially overwrite an existing trigger
// and destroy locks and other data.
if (trigger.id() != null) {
throw new IllegalArgumentException("New trigger must not have an ID");
}
return db.insert(trigger).getSavedObject();
}
/**
* Updates the given trigger record in the database. This method takes care of not overwriting any locks and
* state data with the update.
*
* @param trigger the trigger to update
* @return true when the update was successful, false otherwise
* @throws IllegalArgumentException if the passed trigger doesn't have an ID set
*/
public boolean update(JobTriggerDto trigger) {
requireNonNull(trigger, "trigger cannot be null");
// Make sure we don't update triggers that don't have an ID. This would create a new record instead of updating
// an existing one.
if (isNullOrEmpty(trigger.id())) {
throw new IllegalArgumentException("Trigger must have an ID");
}
// We don't want to allow updating all fields of the trigger. That's why we can't just use "save(JobTriggerDto)"
// because that would overwrite fields like "lock" and others we don't want to update.
final DBUpdate.Builder update = DBUpdate
.set(FIELD_START_TIME, trigger.startTime())
.set(FIELD_NEXT_TIME, trigger.nextTime())
.set(FIELD_DATA, trigger.data())
.set(FIELD_UPDATED_AT, clock.nowUTC());
if (trigger.endTime().isPresent()) {
update.set(FIELD_END_TIME, trigger.endTime());
}
// We cannot just use "update.set(FIELD_SCHEDULE, trigger.schedule()" to update the trigger because mongojack
// has an issue with serializing polymorphic classes and "$set": https://github.com/mongojack/mongojack/issues/101
// That's why JobSchedule objects have the "toDBUpdate()" method to give us all fields for the specific
// schedule implementation. (the fields can be different, depending on the schedule type)
final Optional
© 2015 - 2024 Weber Informatics LLC | Privacy Policy