org.elasticsearch.indices.ttl.IndicesTTLService Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of elasticsearch Show documentation
Show all versions of elasticsearch Show documentation
Elasticsearch subproject :server
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.ttl;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SimpleCollector;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.TransportBulkAction;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.TTLFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.VersionFieldMapper;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.indices.IndicesService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
/**
* A node level service that delete expired docs on node primary shards.
*/
public class IndicesTTLService extends AbstractLifecycleComponent {
public static final Setting INDICES_TTL_INTERVAL_SETTING =
Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60),
Property.Dynamic, Property.NodeScope);
private final ClusterService clusterService;
private final IndicesService indicesService;
private final TransportBulkAction bulkAction;
private final int bulkSize;
private PurgerThread purgerThread;
@Inject
public IndicesTTLService(Settings settings, ClusterService clusterService, IndicesService indicesService, ClusterSettings clusterSettings, TransportBulkAction bulkAction) {
super(settings);
this.clusterService = clusterService;
this.indicesService = indicesService;
TimeValue interval = INDICES_TTL_INTERVAL_SETTING.get(settings);
this.bulkAction = bulkAction;
this.bulkSize = this.settings.getAsInt("indices.ttl.bulk_size", 10000);
this.purgerThread = new PurgerThread(EsExecutors.threadName(settings, "[ttl_expire]"), interval);
clusterSettings.addSettingsUpdateConsumer(INDICES_TTL_INTERVAL_SETTING, this.purgerThread::resetInterval);
}
@Override
protected void doStart() {
this.purgerThread.start();
}
@Override
protected void doStop() {
try {
this.purgerThread.shutdown();
} catch (InterruptedException e) {
// we intentionally do not want to restore the interruption flag, we're about to shutdown anyway
}
}
@Override
protected void doClose() {
}
private class PurgerThread extends Thread {
private final AtomicBoolean running = new AtomicBoolean(true);
private final Notifier notifier;
private final CountDownLatch shutdownLatch = new CountDownLatch(1);
PurgerThread(String name, TimeValue interval) {
super(name);
setDaemon(true);
this.notifier = new Notifier(interval);
}
public void shutdown() throws InterruptedException {
if (running.compareAndSet(true, false)) {
notifier.doNotify();
shutdownLatch.await();
}
}
public void resetInterval(TimeValue interval) {
notifier.setTimeout(interval);
}
@Override
public void run() {
try {
while (running.get()) {
try {
List shardsToPurge = getShardsToPurge();
purgeShards(shardsToPurge);
} catch (Exception e) {
if (running.get()) {
logger.warn("failed to execute ttl purge", e);
}
}
if (running.get()) {
notifier.await();
}
}
} finally {
shutdownLatch.countDown();
}
}
/**
* Returns the shards to purge, i.e. the local started primary shards that have ttl enabled and disable_purge to false
*/
private List getShardsToPurge() {
List shardsToPurge = new ArrayList<>();
MetaData metaData = clusterService.state().metaData();
for (IndexService indexService : indicesService) {
// check the value of disable_purge for this index
IndexMetaData indexMetaData = metaData.index(indexService.index());
if (indexMetaData == null) {
continue;
}
if (indexService.getIndexSettings().isTTLPurgeDisabled()) {
continue;
}
// check if ttl is enabled for at least one type of this index
boolean hasTTLEnabled = false;
for (String type : indexService.mapperService().types()) {
DocumentMapper documentType = indexService.mapperService().documentMapper(type);
if (documentType.TTLFieldMapper().enabled()) {
hasTTLEnabled = true;
break;
}
}
if (hasTTLEnabled) {
for (IndexShard indexShard : indexService) {
if (indexShard.state() == IndexShardState.STARTED && indexShard.routingEntry().primary() && indexShard.routingEntry().started()) {
shardsToPurge.add(indexShard);
}
}
}
}
return shardsToPurge;
}
public TimeValue getInterval() {
return notifier.getTimeout();
}
}
private void purgeShards(List shardsToPurge) {
for (IndexShard shardToPurge : shardsToPurge) {
Query query = shardToPurge.mapperService().fullName(TTLFieldMapper.NAME).rangeQuery(null, System.currentTimeMillis(), false,
true, null);
Engine.Searcher searcher = shardToPurge.acquireSearcher("indices_ttl");
try {
logger.debug("[{}][{}] purging shard", shardToPurge.routingEntry().index(), shardToPurge.routingEntry().id());
ExpiredDocsCollector expiredDocsCollector = new ExpiredDocsCollector();
searcher.searcher().search(query, expiredDocsCollector);
List docsToPurge = expiredDocsCollector.getDocsToPurge();
BulkRequest bulkRequest = new BulkRequest();
for (DocToPurge docToPurge : docsToPurge) {
bulkRequest.add(new DeleteRequest().index(shardToPurge.routingEntry().getIndexName()).type(docToPurge.type).id(docToPurge.id).version(docToPurge.version).routing(docToPurge.routing));
bulkRequest = processBulkIfNeeded(bulkRequest, false);
}
processBulkIfNeeded(bulkRequest, true);
} catch (Exception e) {
logger.warn("failed to purge", e);
} finally {
searcher.close();
}
}
}
private static class DocToPurge {
public final String type;
public final String id;
public final long version;
public final String routing;
DocToPurge(String type, String id, long version, String routing) {
this.type = type;
this.id = id;
this.version = version;
this.routing = routing;
}
}
private class ExpiredDocsCollector extends SimpleCollector {
private LeafReaderContext context;
private List docsToPurge = new ArrayList<>();
private NumericDocValues versions;
ExpiredDocsCollector() {
}
@Override
public void setScorer(Scorer scorer) {
}
@Override
public boolean needsScores() {
return false;
}
@Override
public void collect(int doc) {
try {
FieldsVisitor fieldsVisitor = new FieldsVisitor(false);
context.reader().document(doc, fieldsVisitor);
Uid uid = fieldsVisitor.uid();
final long version = versions == null ? Versions.NOT_FOUND : versions.get(doc);
docsToPurge.add(new DocToPurge(uid.type(), uid.id(), version, fieldsVisitor.routing()));
} catch (Exception e) {
logger.trace("failed to collect doc", e);
}
}
@Override
public void doSetNextReader(LeafReaderContext context) throws IOException {
this.context = context;
this.versions = context.reader().getNumericDocValues(VersionFieldMapper.NAME);
}
public List getDocsToPurge() {
return this.docsToPurge;
}
}
private BulkRequest processBulkIfNeeded(BulkRequest bulkRequest, boolean force) {
if ((force && bulkRequest.numberOfActions() > 0) || bulkRequest.numberOfActions() >= bulkSize) {
try {
bulkAction.executeBulk(bulkRequest, new ActionListener() {
@Override
public void onResponse(BulkResponse bulkResponse) {
if (bulkResponse.hasFailures()) {
int failedItems = 0;
for (BulkItemResponse response : bulkResponse) {
if (response.isFailed()) failedItems++;
}
if (logger.isTraceEnabled()) {
logger.trace("bulk deletion failures for [{}]/[{}] items, failure message: [{}]", failedItems, bulkResponse.getItems().length, bulkResponse.buildFailureMessage());
} else {
logger.error("bulk deletion failures for [{}]/[{}] items", failedItems, bulkResponse.getItems().length);
}
} else {
logger.trace("bulk deletion took {}ms", bulkResponse.getTookInMillis());
}
}
@Override
public void onFailure(Exception e) {
if (logger.isTraceEnabled()) {
logger.trace("failed to execute bulk", e);
} else {
logger.warn("failed to execute bulk: ", e);
}
}
});
} catch (Exception e) {
logger.warn("failed to process bulk", e);
}
bulkRequest = new BulkRequest();
}
return bulkRequest;
}
private static final class Notifier {
private final ReentrantLock lock = new ReentrantLock();
private final Condition condition = lock.newCondition();
private volatile TimeValue timeout;
Notifier(TimeValue timeout) {
assert timeout != null;
this.timeout = timeout;
}
public void await() {
lock.lock();
try {
condition.await(timeout.millis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// we intentionally do not want to restore the interruption flag, we're about to shutdown anyway
} finally {
lock.unlock();
}
}
public void setTimeout(TimeValue timeout) {
assert timeout != null;
this.timeout = timeout;
doNotify();
}
public TimeValue getTimeout() {
return timeout;
}
public void doNotify() {
lock.lock();
try {
condition.signalAll();
} finally {
lock.unlock();
}
}
}
}