
io.druid.server.coordination.ServerManager Maven / Gradle / Ivy
The newest version!
/*
* Druid - a distributed column store.
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package io.druid.server.coordination;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Function;
import com.google.common.base.Predicates;
import com.google.common.collect.Ordering;
import com.google.inject.Inject;
import com.metamx.common.ISE;
import com.metamx.common.guava.FunctionalIterable;
import com.metamx.emitter.EmittingLogger;
import com.metamx.emitter.service.ServiceEmitter;
import com.metamx.emitter.service.ServiceMetricEvent;
import io.druid.client.CachingQueryRunner;
import io.druid.client.cache.Cache;
import io.druid.client.cache.CacheConfig;
import io.druid.collections.CountingMap;
import io.druid.guice.annotations.Processing;
import io.druid.guice.annotations.Smile;
import io.druid.query.BySegmentQueryRunner;
import io.druid.query.DataSource;
import io.druid.query.FinalizeResultsQueryRunner;
import io.druid.query.MetricsEmittingQueryRunner;
import io.druid.query.NoopQueryRunner;
import io.druid.query.Query;
import io.druid.query.QueryRunner;
import io.druid.query.QueryRunnerFactory;
import io.druid.query.QueryRunnerFactoryConglomerate;
import io.druid.query.QuerySegmentWalker;
import io.druid.query.QueryToolChest;
import io.druid.query.ReferenceCountingSegmentQueryRunner;
import io.druid.query.SegmentDescriptor;
import io.druid.query.TableDataSource;
import io.druid.query.spec.SpecificSegmentQueryRunner;
import io.druid.query.spec.SpecificSegmentSpec;
import io.druid.segment.ReferenceCountingSegment;
import io.druid.segment.Segment;
import io.druid.segment.loading.SegmentLoader;
import io.druid.segment.loading.SegmentLoadingException;
import io.druid.timeline.DataSegment;
import io.druid.timeline.TimelineObjectHolder;
import io.druid.timeline.VersionedIntervalTimeline;
import io.druid.timeline.partition.PartitionChunk;
import io.druid.timeline.partition.PartitionHolder;
import org.joda.time.Interval;
import javax.annotation.Nullable;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
/**
*/
public class ServerManager implements QuerySegmentWalker
{
private static final EmittingLogger log = new EmittingLogger(ServerManager.class);
private final Object lock = new Object();
private final SegmentLoader segmentLoader;
private final QueryRunnerFactoryConglomerate conglomerate;
private final ServiceEmitter emitter;
private final ExecutorService exec;
private final Map> dataSources;
private final CountingMap dataSourceSizes = new CountingMap();
private final CountingMap dataSourceCounts = new CountingMap();
private final Cache cache;
private final ObjectMapper objectMapper;
private final CacheConfig cacheConfig;
@Inject
public ServerManager(
SegmentLoader segmentLoader,
QueryRunnerFactoryConglomerate conglomerate,
ServiceEmitter emitter,
@Processing ExecutorService exec,
@Smile ObjectMapper objectMapper,
Cache cache,
CacheConfig cacheConfig
)
{
this.segmentLoader = segmentLoader;
this.conglomerate = conglomerate;
this.emitter = emitter;
this.exec = exec;
this.cache = cache;
this.objectMapper = objectMapper;
this.dataSources = new HashMap<>();
this.cacheConfig = cacheConfig;
}
public Map getDataSourceSizes()
{
synchronized (dataSourceSizes) {
return dataSourceSizes.snapshot();
}
}
public Map getDataSourceCounts()
{
synchronized (dataSourceCounts) {
return dataSourceCounts.snapshot();
}
}
public boolean isSegmentCached(final DataSegment segment) throws SegmentLoadingException
{
return segmentLoader.isSegmentLoaded(segment);
}
/**
* Load a single segment.
*
* @param segment segment to load
*
* @return true if the segment was newly loaded, false if it was already loaded
*
* @throws SegmentLoadingException if the segment cannot be loaded
*/
public boolean loadSegment(final DataSegment segment) throws SegmentLoadingException
{
final Segment adapter;
try {
adapter = segmentLoader.getSegment(segment);
}
catch (SegmentLoadingException e) {
try {
segmentLoader.cleanup(segment);
}
catch (SegmentLoadingException e1) {
// ignore
}
throw e;
}
if (adapter == null) {
throw new SegmentLoadingException("Null adapter from loadSpec[%s]", segment.getLoadSpec());
}
synchronized (lock) {
String dataSource = segment.getDataSource();
VersionedIntervalTimeline loadedIntervals = dataSources.get(dataSource);
if (loadedIntervals == null) {
loadedIntervals = new VersionedIntervalTimeline(Ordering.natural());
dataSources.put(dataSource, loadedIntervals);
}
PartitionHolder entry = loadedIntervals.findEntry(
segment.getInterval(),
segment.getVersion()
);
if ((entry != null) && (entry.getChunk(segment.getShardSpec().getPartitionNum()) != null)) {
log.warn("Told to load a adapter for a segment[%s] that already exists", segment.getIdentifier());
return false;
}
loadedIntervals.add(
segment.getInterval(),
segment.getVersion(),
segment.getShardSpec().createChunk(new ReferenceCountingSegment(adapter))
);
synchronized (dataSourceSizes) {
dataSourceSizes.add(dataSource, segment.getSize());
}
synchronized (dataSourceCounts) {
dataSourceCounts.add(dataSource, 1L);
}
return true;
}
}
public void dropSegment(final DataSegment segment) throws SegmentLoadingException
{
String dataSource = segment.getDataSource();
synchronized (lock) {
VersionedIntervalTimeline loadedIntervals = dataSources.get(dataSource);
if (loadedIntervals == null) {
log.info("Told to delete a queryable for a dataSource[%s] that doesn't exist.", dataSource);
return;
}
PartitionChunk removed = loadedIntervals.remove(
segment.getInterval(),
segment.getVersion(),
segment.getShardSpec().createChunk((ReferenceCountingSegment) null)
);
ReferenceCountingSegment oldQueryable = (removed == null) ? null : removed.getObject();
if (oldQueryable != null) {
synchronized (dataSourceSizes) {
dataSourceSizes.add(dataSource, -segment.getSize());
}
synchronized (dataSourceCounts) {
dataSourceCounts.add(dataSource, -1L);
}
try {
log.info("Attempting to close segment %s", segment.getIdentifier());
oldQueryable.close();
}
catch (IOException e) {
log.makeAlert(e, "Exception closing segment")
.addData("dataSource", dataSource)
.addData("segmentId", segment.getIdentifier())
.emit();
}
} else {
log.info(
"Told to delete a queryable on dataSource[%s] for interval[%s] and version [%s] that I don't have.",
dataSource,
segment.getInterval(),
segment.getVersion()
);
}
}
segmentLoader.cleanup(segment);
}
@Override
public QueryRunner getQueryRunnerForIntervals(Query query, Iterable intervals)
{
final QueryRunnerFactory> factory = conglomerate.findFactory(query);
if (factory == null) {
throw new ISE("Unknown query type[%s].", query.getClass());
}
final QueryToolChest> toolChest = factory.getToolchest();
DataSource dataSource = query.getDataSource();
if (!(dataSource instanceof TableDataSource)) {
throw new UnsupportedOperationException("data source type '" + dataSource.getClass().getName() + "' unsupported");
}
String dataSourceName;
try {
dataSourceName = ((TableDataSource) query.getDataSource()).getName();
}
catch (ClassCastException e) {
throw new UnsupportedOperationException("Subqueries are only supported in the broker");
}
final VersionedIntervalTimeline timeline = dataSources.get(dataSourceName);
if (timeline == null) {
return new NoopQueryRunner();
}
FunctionalIterable> adapters = FunctionalIterable
.create(intervals)
.transformCat(
new Function>>()
{
@Override
public Iterable> apply(Interval input)
{
return timeline.lookup(input);
}
}
)
.transformCat(
new Function, Iterable>>()
{
@Override
public Iterable> apply(
@Nullable final TimelineObjectHolder holder
)
{
if (holder == null) {
return null;
}
return FunctionalIterable
.create(holder.getObject())
.transform(
new Function, QueryRunner>()
{
@Override
public QueryRunner apply(PartitionChunk input)
{
return buildAndDecorateQueryRunner(
factory,
toolChest,
input.getObject(),
new SegmentDescriptor(
holder.getInterval(),
holder.getVersion(),
input.getChunkNumber()
)
);
}
}
)
.filter(Predicates.>notNull());
}
}
)
.filter(
Predicates.>notNull()
);
return new FinalizeResultsQueryRunner(toolChest.mergeResults(factory.mergeRunners(exec, adapters)), toolChest);
}
@Override
public QueryRunner getQueryRunnerForSegments(Query query, Iterable specs)
{
final QueryRunnerFactory> factory = conglomerate.findFactory(query);
if (factory == null) {
log.makeAlert("Unknown query type, [%s]", query.getClass())
.addData("dataSource", query.getDataSource())
.emit();
return new NoopQueryRunner();
}
final QueryToolChest> toolChest = factory.getToolchest();
String dataSourceName;
try {
dataSourceName = ((TableDataSource) query.getDataSource()).getName();
}
catch (ClassCastException e) {
throw new UnsupportedOperationException("Subqueries are only supported in the broker");
}
final VersionedIntervalTimeline timeline = dataSources.get(dataSourceName);
if (timeline == null) {
return new NoopQueryRunner();
}
FunctionalIterable> adapters = FunctionalIterable
.create(specs)
.transformCat(
new Function>>()
{
@Override
@SuppressWarnings("unchecked")
public Iterable> apply(SegmentDescriptor input)
{
final PartitionHolder entry = timeline.findEntry(
input.getInterval(), input.getVersion()
);
if (entry == null) {
return null;
}
final PartitionChunk chunk = entry.getChunk(input.getPartitionNumber());
if (chunk == null) {
return null;
}
final ReferenceCountingSegment adapter = chunk.getObject();
return Arrays.asList(
buildAndDecorateQueryRunner(factory, toolChest, adapter, input)
);
}
}
)
.filter(
Predicates.>notNull()
);
return new FinalizeResultsQueryRunner(toolChest.mergeResults(factory.mergeRunners(exec, adapters)), toolChest);
}
private QueryRunner buildAndDecorateQueryRunner(
final QueryRunnerFactory> factory,
final QueryToolChest> toolChest,
final ReferenceCountingSegment adapter,
final SegmentDescriptor segmentDescriptor
)
{
SpecificSegmentSpec segmentSpec = new SpecificSegmentSpec(segmentDescriptor);
return new SpecificSegmentQueryRunner(
new MetricsEmittingQueryRunner(
emitter,
new Function, ServiceMetricEvent.Builder>()
{
@Override
public ServiceMetricEvent.Builder apply(@Nullable final Query input)
{
return toolChest.makeMetricBuilder(input);
}
},
new BySegmentQueryRunner(
adapter.getIdentifier(),
adapter.getDataInterval().getStart(),
new CachingQueryRunner(
adapter.getIdentifier(),
segmentDescriptor,
objectMapper,
cache,
toolChest,
new ReferenceCountingSegmentQueryRunner(factory, adapter),
cacheConfig
)
)
).withWaitMeasuredFromNow(),
segmentSpec
);
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy