
io.druid.client.CachingClusteredClient Maven / Gradle / Ivy
The newest version!
/*
* Druid - a distributed column store.
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package io.druid.client;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Function;
import com.google.common.base.Supplier;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Ordering;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.inject.Inject;
import com.metamx.common.Pair;
import com.metamx.common.guava.BaseSequence;
import com.metamx.common.guava.LazySequence;
import com.metamx.common.guava.Sequence;
import com.metamx.common.guava.Sequences;
import com.metamx.emitter.EmittingLogger;
import io.druid.client.cache.Cache;
import io.druid.client.cache.CacheConfig;
import io.druid.client.selector.QueryableDruidServer;
import io.druid.client.selector.ServerSelector;
import io.druid.guice.annotations.Smile;
import io.druid.query.BySegmentResultValueClass;
import io.druid.query.CacheStrategy;
import io.druid.query.Query;
import io.druid.query.QueryRunner;
import io.druid.query.QueryToolChest;
import io.druid.query.QueryToolChestWarehouse;
import io.druid.query.Result;
import io.druid.query.SegmentDescriptor;
import io.druid.query.aggregation.MetricManipulatorFns;
import io.druid.query.spec.MultipleSpecificSegmentSpec;
import io.druid.timeline.DataSegment;
import io.druid.timeline.TimelineObjectHolder;
import io.druid.timeline.VersionedIntervalTimeline;
import io.druid.timeline.partition.PartitionChunk;
import org.joda.time.DateTime;
import org.joda.time.Interval;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executors;
/**
*/
public class CachingClusteredClient implements QueryRunner
{
private static final EmittingLogger log = new EmittingLogger(CachingClusteredClient.class);
private final QueryToolChestWarehouse warehouse;
private final TimelineServerView serverView;
private final Cache cache;
private final ObjectMapper objectMapper;
private final CacheConfig cacheConfig;
@Inject
public CachingClusteredClient(
QueryToolChestWarehouse warehouse,
TimelineServerView serverView,
Cache cache,
@Smile ObjectMapper objectMapper,
CacheConfig cacheConfig
)
{
this.warehouse = warehouse;
this.serverView = serverView;
this.cache = cache;
this.objectMapper = objectMapper;
this.cacheConfig = cacheConfig;
serverView.registerSegmentCallback(
Executors.newFixedThreadPool(
1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("CCClient-ServerView-CB-%d").build()
),
new ServerView.BaseSegmentCallback()
{
@Override
public ServerView.CallbackAction segmentRemoved(DruidServer server, DataSegment segment)
{
CachingClusteredClient.this.cache.close(segment.getIdentifier());
return ServerView.CallbackAction.CONTINUE;
}
}
);
}
@Override
public Sequence run(final Query query)
{
final QueryToolChest> toolChest = warehouse.getToolChest(query);
final CacheStrategy> strategy = toolChest.getCacheStrategy(query);
final Map> serverSegments = Maps.newTreeMap();
final List> cachedResults = Lists.newArrayList();
final Map cachePopulatorMap = Maps.newHashMap();
final boolean useCache = query.getContextUseCache(true)
&& strategy != null
&& cacheConfig.isUseCache();
final boolean populateCache = query.getContextPopulateCache(true)
&& strategy != null && cacheConfig.isPopulateCache();
final boolean isBySegment = query.getContextBySegment(false);
ImmutableMap.Builder contextBuilder = new ImmutableMap.Builder<>();
final int priority = query.getContextPriority(0);
contextBuilder.put("priority", priority);
if (populateCache) {
contextBuilder.put(CacheConfig.POPULATE_CACHE, false);
contextBuilder.put("bySegment", true);
}
contextBuilder.put("intermediate", true);
final Query rewrittenQuery = query.withOverriddenContext(contextBuilder.build());
VersionedIntervalTimeline timeline = serverView.getTimeline(query.getDataSource());
if (timeline == null) {
return Sequences.empty();
}
// build set of segments to query
Set> segments = Sets.newLinkedHashSet();
List> serversLookup = Lists.newLinkedList();
for (Interval interval : rewrittenQuery.getIntervals()) {
serversLookup.addAll(timeline.lookup(interval));
}
// Let tool chest filter out unneeded segments
final List> filteredServersLookup =
toolChest.filterSegments(query, serversLookup);
for (TimelineObjectHolder holder : filteredServersLookup) {
for (PartitionChunk chunk : holder.getObject()) {
ServerSelector selector = chunk.getObject();
final SegmentDescriptor descriptor = new SegmentDescriptor(
holder.getInterval(), holder.getVersion(), chunk.getChunkNumber()
);
segments.add(Pair.of(selector, descriptor));
}
}
final byte[] queryCacheKey;
if (strategy != null) {
queryCacheKey = strategy.computeCacheKey(query);
} else {
queryCacheKey = null;
}
if (queryCacheKey != null) {
Map, Cache.NamedKey> cacheKeys = Maps.newHashMap();
for (Pair segment : segments) {
final Cache.NamedKey segmentCacheKey = CacheUtil.computeSegmentCacheKey(
segment.lhs.getSegment().getIdentifier(),
segment.rhs,
queryCacheKey
);
cacheKeys.put(segment, segmentCacheKey);
}
// Pull cached segments from cache and remove from set of segments to query
final Map cachedValues;
if (useCache) {
cachedValues = cache.getBulk(cacheKeys.values());
} else {
cachedValues = ImmutableMap.of();
}
for (Map.Entry, Cache.NamedKey> entry : cacheKeys.entrySet()) {
Pair segment = entry.getKey();
Cache.NamedKey segmentCacheKey = entry.getValue();
final Interval segmentQueryInterval = segment.rhs.getInterval();
final byte[] cachedValue = cachedValues.get(segmentCacheKey);
if (cachedValue != null) {
// remove cached segment from set of segments to query
segments.remove(segment);
cachedResults.add(Pair.of(segmentQueryInterval.getStart(), cachedValue));
} else if (populateCache) {
final String segmentIdentifier = segment.lhs.getSegment().getIdentifier();
cachePopulatorMap.put(
String.format("%s_%s", segmentIdentifier, segmentQueryInterval),
new CachePopulator(cache, objectMapper, segmentCacheKey)
);
}
}
}
// Compile list of all segments not pulled from cache
for (Pair segment : segments) {
final QueryableDruidServer queryableDruidServer = segment.lhs.pick();
if (queryableDruidServer == null) {
log.error("No servers found for %s?! How can this be?!", segment.rhs);
} else {
final DruidServer server = queryableDruidServer.getServer();
List descriptors = serverSegments.get(server);
if (descriptors == null) {
descriptors = Lists.newArrayList();
serverSegments.put(server, descriptors);
}
descriptors.add(segment.rhs);
}
}
return new LazySequence<>(
new Supplier>()
{
@Override
public Sequence get()
{
ArrayList>> listOfSequences = Lists.newArrayList();
addSequencesFromServer(listOfSequences);
addSequencesFromCache(listOfSequences);
Collections.sort(
listOfSequences,
Ordering.natural().onResultOf(Pair.>lhsFn())
);
final Sequence> seq = Sequences.simple(
Iterables.transform(listOfSequences, Pair.>rhsFn())
);
if (strategy == null) {
return toolChest.mergeSequences(seq);
} else {
return strategy.mergeSequences(seq);
}
}
private void addSequencesFromCache(ArrayList>> listOfSequences)
{
if (strategy == null) {
return;
}
final Function
© 2015 - 2025 Weber Informatics LLC | Privacy Policy