
org.killbill.queue.DBBackedQueueWithPolling Maven / Gradle / Ivy
/*
* Copyright 2010-2014 Ning, Inc.
* Copyright 2014-2020 Groupon, Inc
* Copyright 2020-2020 Equinix, Inc
* Copyright 2014-2020 The Billing Project, LLC
*
* The Billing Project licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.killbill.queue;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.joda.time.DateTime;
import org.killbill.CreatorName;
import org.killbill.clock.Clock;
import org.killbill.commons.metrics.api.MetricRegistry;
import org.killbill.queue.api.PersistentQueueConfig;
import org.killbill.queue.api.PersistentQueueConfig.PersistentQueueMode;
import org.killbill.queue.api.PersistentQueueEntryLifecycleState;
import org.killbill.queue.dao.EventEntryModelDao;
import org.killbill.queue.dao.QueueSqlDao;
import org.skife.jdbi.v2.IDBI;
import org.skife.jdbi.v2.Transaction;
import org.skife.jdbi.v2.TransactionStatus;
public class DBBackedQueueWithPolling extends DBBackedQueue {
public DBBackedQueueWithPolling(final Clock clock,
final IDBI dbi,
final Class extends QueueSqlDao> sqlDaoClass,
final PersistentQueueConfig config,
final String dbBackedQId,
final MetricRegistry metricRegistry) {
super(clock, dbi, sqlDaoClass, config, dbBackedQId, metricRegistry);
}
@Override
public void initialize() {
log.info("{} Initialized mode={}",
DB_QUEUE_LOG_ID, config.getPersistentQueueMode());
}
@Override
public void close() {
}
@Override
public void insertEntryFromTransaction(final QueueSqlDao transactional, final T entry) {
safeInsertEntry(transactional, entry);
}
@Override
public ReadyEntriesWithMetrics getReadyEntries() {
final long ini = System.nanoTime();
final List claimedEntries = executeTransaction(new Transaction<>() {
@Override
public List inTransaction(final QueueSqlDao queueSqlDao, final TransactionStatus status) throws Exception {
final DateTime now = clock.getUTCNow();
final List entriesToClaim = fetchReadyEntries(now, config.getMaxEntriesClaimed(), queueSqlDao);
List claimedEntries = Collections.emptyList();
if (!entriesToClaim.isEmpty()) {
log.debug("{} Entries to claim: {}", DB_QUEUE_LOG_ID, entriesToClaim);
claimedEntries = claimEntries(now, entriesToClaim, queueSqlDao);
}
return claimedEntries;
}
});
return new ReadyEntriesWithMetrics(claimedEntries, System.nanoTime() - ini);
}
@Override
public void updateOnError(final T entry) {
executeTransaction(new Transaction>() {
@Override
public Void inTransaction(final QueueSqlDao transactional, final TransactionStatus status) throws Exception {
transactional.updateOnError(entry.getRecordId(), clock.getUTCNow().toDate(), entry.getErrorCount(), config.getTableName());
return null;
}
});
}
@Override
protected void insertReapedEntriesFromTransaction(final QueueSqlDao transactional, final List entriesLeftBehind, final DateTime now) {
for (final T entry : entriesLeftBehind) {
entry.setCreatedDate(now);
entry.setProcessingState(PersistentQueueEntryLifecycleState.AVAILABLE);
entry.setCreatingOwner(CreatorName.get());
entry.setProcessingOwner(null);
}
transactional.insertEntries(entriesLeftBehind, config.getTableName());
}
private List fetchReadyEntries(final DateTime now, final int maxEntries, final QueueSqlDao queueSqlDao) {
final String owner = config.getPersistentQueueMode() == PersistentQueueMode.POLLING ? null : CreatorName.get();
final long ini = System.nanoTime();
final List result = queueSqlDao.getReadyEntries(now.toDate(), maxEntries, owner, config.getTableName());
rawGetEntriesTime.update(System.nanoTime() - ini, TimeUnit.NANOSECONDS);
return result;
}
private List claimEntries(final DateTime now, final List candidates, final QueueSqlDao queueSqlDao) {
switch (config.getPersistentQueueMode()) {
case POLLING:
return sequentialClaimEntries(now, candidates, queueSqlDao);
case STICKY_POLLING:
return batchClaimEntries(now, candidates, queueSqlDao);
default:
throw new IllegalStateException("Unsupported PersistentQueueMode " + config.getPersistentQueueMode());
}
}
private List batchClaimEntries(final DateTime utcNow, final List candidates, final QueueSqlDao queueSqlDao) {
if (candidates.isEmpty()) {
return Collections.emptyList();
}
final Date now = utcNow.toDate();
final Date nextAvailable = utcNow.plus(config.getClaimedTime().getMillis()).toDate();
final String owner = CreatorName.get();
final List recordIds = candidates.stream()
.map(input -> input == null ? Long.valueOf( -1L) : input.getRecordId())
.collect(Collectors.toUnmodifiableList());
final long ini = System.nanoTime();
final int resultCount = queueSqlDao.claimEntries(recordIds, owner, nextAvailable, config.getTableName());
rawClaimEntriesTime.update(System.nanoTime() - ini, TimeUnit.NANOSECONDS);
// We should ALWAYS see the same number since we are in STICKY_POLLING mode and there is only one thread claiming entries.
// We keep the 2 cases below for safety (code was written when this was MT-threaded), and we log with warn (will eventually remove it in the future)
if (resultCount == candidates.size()) {
log.debug("{} batchClaimEntries claimed (recordIds={}, now={}, nextAvailable={}, owner={}): {}",
DB_QUEUE_LOG_ID, recordIds, now, nextAvailable, owner, candidates);
return candidates;
} else {
final List maybeClaimedEntries = queueSqlDao.getEntriesFromIds(recordIds, config.getTableName());
final StringBuilder stringBuilder = new StringBuilder();
for (int i = 0; i < maybeClaimedEntries.size(); i++) {
final T eventEntryModelDao = maybeClaimedEntries.get(i);
if (i > 0) {
stringBuilder.append(",");
}
stringBuilder.append("[recordId=").append(eventEntryModelDao.getRecordId())
.append(",processingState=").append(eventEntryModelDao.getProcessingState())
.append(",processingOwner=").append(eventEntryModelDao.getProcessingOwner())
.append(",processingAvailableDate=").append(eventEntryModelDao.getNextAvailableDate())
.append("]");
}
log.warn("{} batchClaimEntries only claimed partial entries {}/{} (now={}, nextAvailable={}, owner={}): {}",
DB_QUEUE_LOG_ID, resultCount, candidates.size(), now, nextAvailable, owner, stringBuilder.toString());
return maybeClaimedEntries.stream()
.filter(input -> input != null &&
input.getProcessingState() == PersistentQueueEntryLifecycleState.IN_PROCESSING &&
owner.equals(input.getProcessingOwner()))
.collect(Collectors.toUnmodifiableList());
}
}
//
// In non sticky mode, we don't optimize claim update because we can't synchronize easily -- we could rely on global lock,
// but we are looking for performance and that does not the right choice.
//
private List sequentialClaimEntries(final DateTime now, final List candidates, final QueueSqlDao queueSqlDao) {
return candidates.stream()
.filter(input -> claimEntry(now, input, queueSqlDao))
.collect(Collectors.toUnmodifiableList());
}
private boolean claimEntry(final DateTime now, final T entry, final QueueSqlDao queueSqlDao) {
final Date nextAvailable = now.plus(config.getClaimedTime().getMillis()).toDate();
final long ini = System.nanoTime();
final int claimEntry = queueSqlDao.claimEntry(entry.getRecordId(), CreatorName.get(), nextAvailable, config.getTableName());
rawClaimEntryTime.update(System.nanoTime() - ini, TimeUnit.NANOSECONDS);
final boolean claimed = (claimEntry == 1);
if (claimed) {
log.debug("{} Claimed entry {}", DB_QUEUE_LOG_ID, entry);
}
return claimed;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy