
org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.indexing.seekablestream;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import org.apache.druid.data.input.Committer;
import org.apache.druid.data.input.InputRow;
import org.apache.druid.data.input.impl.InputRowParser;
import org.apache.druid.discovery.DiscoveryDruidNode;
import org.apache.druid.discovery.LookupNodeService;
import org.apache.druid.discovery.NodeType;
import org.apache.druid.indexer.IngestionState;
import org.apache.druid.indexer.TaskStatus;
import org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReport;
import org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData;
import org.apache.druid.indexing.common.LockGranularity;
import org.apache.druid.indexing.common.TaskLockType;
import org.apache.druid.indexing.common.TaskRealtimeMetricsMonitorBuilder;
import org.apache.druid.indexing.common.TaskReport;
import org.apache.druid.indexing.common.TaskToolbox;
import org.apache.druid.indexing.common.actions.CheckPointDataSourceMetadataAction;
import org.apache.druid.indexing.common.actions.ResetDataSourceMetadataAction;
import org.apache.druid.indexing.common.actions.SegmentLockAcquireAction;
import org.apache.druid.indexing.common.actions.TimeChunkLockAcquireAction;
import org.apache.druid.indexing.common.stats.RowIngestionMeters;
import org.apache.druid.indexing.common.stats.RowIngestionMetersFactory;
import org.apache.druid.indexing.common.task.IndexTaskUtils;
import org.apache.druid.indexing.common.task.RealtimeIndexTask;
import org.apache.druid.indexing.seekablestream.common.OrderedPartitionableRecord;
import org.apache.druid.indexing.seekablestream.common.OrderedSequenceNumber;
import org.apache.druid.indexing.seekablestream.common.RecordSupplier;
import org.apache.druid.indexing.seekablestream.common.StreamPartition;
import org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor;
import org.apache.druid.java.util.common.DateTimes;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.collect.Utils;
import org.apache.druid.java.util.common.parsers.ParseException;
import org.apache.druid.java.util.emitter.EmittingLogger;
import org.apache.druid.segment.indexing.RealtimeIOConfig;
import org.apache.druid.segment.realtime.FireDepartment;
import org.apache.druid.segment.realtime.FireDepartmentMetrics;
import org.apache.druid.segment.realtime.appenderator.Appenderator;
import org.apache.druid.segment.realtime.appenderator.AppenderatorDriverAddResult;
import org.apache.druid.segment.realtime.appenderator.AppenderatorsManager;
import org.apache.druid.segment.realtime.appenderator.SegmentsAndMetadata;
import org.apache.druid.segment.realtime.appenderator.StreamAppenderatorDriver;
import org.apache.druid.segment.realtime.firehose.ChatHandler;
import org.apache.druid.segment.realtime.firehose.ChatHandlerProvider;
import org.apache.druid.server.security.Access;
import org.apache.druid.server.security.Action;
import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.timeline.DataSegment;
import org.apache.druid.utils.CircularBuffer;
import org.apache.druid.utils.CollectionUtils;
import org.joda.time.DateTime;
import javax.annotation.Nullable;
import javax.servlet.http.HttpServletRequest;
import javax.validation.constraints.NotNull;
import javax.ws.rs.Consumes;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
/**
* Interface for abstracting the indexing task run logic.
*
* @param Partition Number Type
* @param Sequence Number Type
*/
public abstract class SeekableStreamIndexTaskRunner implements ChatHandler
{
public enum Status
{
NOT_STARTED,
STARTING,
READING,
PAUSED,
PUBLISHING
}
private static final EmittingLogger log = new EmittingLogger(SeekableStreamIndexTaskRunner.class);
static final String METADATA_NEXT_PARTITIONS = "nextPartitions";
static final String METADATA_PUBLISH_PARTITIONS = "publishPartitions";
private final Map endOffsets;
// lastReadOffsets are the last offsets that were read and processed.
private final Map lastReadOffsets = new HashMap<>();
// currOffsets are what should become the start offsets of the next reader, if we stopped reading now. They are
// initialized to the start offsets when the task begins.
private final ConcurrentMap currOffsets = new ConcurrentHashMap<>();
private final ConcurrentMap lastPersistedOffsets = new ConcurrentHashMap<>();
// The pause lock and associated conditions are to support coordination between the Jetty threads and the main
// ingestion loop. The goal is to provide callers of the API a guarantee that if pause() returns successfully
// the ingestion loop has been stopped at the returned sequences and will not ingest any more data until resumed. The
// fields are used as follows (every step requires acquiring [pauseLock]):
// Pausing:
// - In pause(), [pauseRequested] is set to true and then execution waits for [status] to change to PAUSED, with the
// condition checked when [hasPaused] is signalled.
// - In possiblyPause() called from the main loop, if [pauseRequested] is true, [status] is set to PAUSED,
// [hasPaused] is signalled, and execution pauses until [pauseRequested] becomes false, either by being set or by
// the [pauseMillis] timeout elapsing. [pauseRequested] is checked when [shouldResume] is signalled.
// Resuming:
// - In resume(), [pauseRequested] is set to false, [shouldResume] is signalled, and execution waits for [status] to
// change to something other than PAUSED, with the condition checked when [shouldResume] is signalled.
// - In possiblyPause(), when [shouldResume] is signalled, if [pauseRequested] has become false the pause loop ends,
// [status] is changed to STARTING and [shouldResume] is signalled.
private final Lock pauseLock = new ReentrantLock();
private final Condition hasPaused = pauseLock.newCondition();
private final Condition shouldResume = pauseLock.newCondition();
protected final AtomicBoolean stopRequested = new AtomicBoolean(false);
private final AtomicBoolean publishOnStop = new AtomicBoolean(false);
// [statusLock] is used to synchronize the Jetty thread calling stopGracefully() with the main run thread. It prevents
// the main run thread from switching into a publishing state while the stopGracefully() thread thinks it's still in
// a pre-publishing state. This is important because stopGracefully() will try to use the [stopRequested] flag to stop
// the main thread where possible, but this flag is not honored once publishing has begun so in this case we must
// interrupt the thread. The lock ensures that if the run thread is about to transition into publishing state, it
// blocks until after stopGracefully() has set [stopRequested] and then does a final check on [stopRequested] before
// transitioning to publishing state.
private final Object statusLock = new Object();
protected final Lock pollRetryLock = new ReentrantLock();
protected final Condition isAwaitingRetry = pollRetryLock.newCondition();
private final SeekableStreamIndexTask task;
private final SeekableStreamIndexTaskIOConfig ioConfig;
private final SeekableStreamIndexTaskTuningConfig tuningConfig;
private final InputRowParser parser;
private final AuthorizerMapper authorizerMapper;
private final Optional chatHandlerProvider;
private final CircularBuffer savedParseExceptions;
private final String stream;
private final RowIngestionMeters rowIngestionMeters;
private final AppenderatorsManager appenderatorsManager;
private final Set publishingSequences = Sets.newConcurrentHashSet();
private final List> publishWaitList = new ArrayList<>();
private final List> handOffWaitList = new ArrayList<>();
private final LockGranularity lockGranularityToUse;
private volatile DateTime startTime;
private volatile Status status = Status.NOT_STARTED; // this is only ever set by the task runner thread (runThread)
private volatile TaskToolbox toolbox;
private volatile Thread runThread;
private volatile Appenderator appenderator;
private volatile StreamAppenderatorDriver driver;
private volatile IngestionState ingestionState;
protected volatile boolean pauseRequested = false;
private volatile long nextCheckpointTime;
private volatile CopyOnWriteArrayList> sequences;
private volatile Throwable backgroundThreadException;
public SeekableStreamIndexTaskRunner(
final SeekableStreamIndexTask task,
final InputRowParser parser,
final AuthorizerMapper authorizerMapper,
final Optional chatHandlerProvider,
final CircularBuffer savedParseExceptions,
final RowIngestionMetersFactory rowIngestionMetersFactory,
final AppenderatorsManager appenderatorsManager,
final LockGranularity lockGranularityToUse
)
{
Preconditions.checkNotNull(task);
this.task = task;
this.ioConfig = task.getIOConfig();
this.tuningConfig = task.getTuningConfig();
this.parser = parser;
this.authorizerMapper = authorizerMapper;
this.chatHandlerProvider = chatHandlerProvider;
this.savedParseExceptions = savedParseExceptions;
this.stream = ioConfig.getStartSequenceNumbers().getStream();
this.rowIngestionMeters = rowIngestionMetersFactory.createRowIngestionMeters();
this.appenderatorsManager = appenderatorsManager;
this.endOffsets = new ConcurrentHashMap<>(ioConfig.getEndSequenceNumbers().getPartitionSequenceNumberMap());
this.sequences = new CopyOnWriteArrayList<>();
this.ingestionState = IngestionState.NOT_STARTED;
this.lockGranularityToUse = lockGranularityToUse;
resetNextCheckpointTime();
}
public TaskStatus run(TaskToolbox toolbox)
{
try {
return runInternal(toolbox);
}
catch (Exception e) {
log.error(e, "Encountered exception while running task.");
final String errorMsg = Throwables.getStackTraceAsString(e);
toolbox.getTaskReportFileWriter().write(task.getId(), getTaskCompletionReports(errorMsg));
return TaskStatus.failure(
task.getId(),
errorMsg
);
}
}
private Set computeExclusiveStartPartitionsForSequence(
Map sequenceStartOffsets
)
{
if (sequenceStartOffsets.equals(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap())) {
return ioConfig.getStartSequenceNumbers().getExclusivePartitions();
} else {
return isEndOffsetExclusive() ? Collections.emptySet() : sequenceStartOffsets.keySet();
}
}
@VisibleForTesting
public void setToolbox(TaskToolbox toolbox)
{
this.toolbox = toolbox;
}
@VisibleForTesting
public void initializeSequences() throws IOException
{
if (!restoreSequences()) {
final TreeMap> checkpoints = getCheckPointsFromContext(
toolbox,
task.getContextValue(SeekableStreamSupervisor.CHECKPOINTS_CTX_KEY)
);
if (checkpoints != null) {
Iterator>> sequenceOffsets = checkpoints.entrySet()
.iterator();
Map.Entry> previous = sequenceOffsets.next();
while (sequenceOffsets.hasNext()) {
Map.Entry> current = sequenceOffsets.next();
final Set exclusiveStartPartitions = computeExclusiveStartPartitionsForSequence(
previous.getValue()
);
addSequence(
new SequenceMetadata<>(
previous.getKey(),
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
previous.getValue(),
current.getValue(),
true,
exclusiveStartPartitions
)
);
previous = current;
}
final Set exclusiveStartPartitions = computeExclusiveStartPartitionsForSequence(
previous.getValue()
);
addSequence(
new SequenceMetadata<>(
previous.getKey(),
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), previous.getKey()),
previous.getValue(),
endOffsets,
false,
exclusiveStartPartitions
)
);
} else {
addSequence(
new SequenceMetadata<>(
0,
StringUtils.format("%s_%s", ioConfig.getBaseSequenceName(), 0),
ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap(),
endOffsets,
false,
ioConfig.getStartSequenceNumbers().getExclusivePartitions()
)
);
}
}
log.info("Starting with sequences: %s", sequences);
}
private TaskStatus runInternal(TaskToolbox toolbox) throws Exception
{
log.info("SeekableStream indexing task starting up!");
startTime = DateTimes.nowUtc();
status = Status.STARTING;
setToolbox(toolbox);
initializeSequences();
if (chatHandlerProvider.isPresent()) {
log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
chatHandlerProvider.get().register(task.getId(), this, false);
} else {
log.warn("No chat handler detected");
}
runThread = Thread.currentThread();
// Set up FireDepartmentMetrics
final FireDepartment fireDepartmentForMetrics = new FireDepartment(
task.getDataSchema(),
new RealtimeIOConfig(null, null),
null
);
FireDepartmentMetrics fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
toolbox.getMonitorScheduler()
.addMonitor(TaskRealtimeMetricsMonitorBuilder.build(task, fireDepartmentForMetrics, rowIngestionMeters));
final String lookupTier = task.getContextValue(RealtimeIndexTask.CTX_KEY_LOOKUP_TIER);
final LookupNodeService lookupNodeService = lookupTier == null ?
toolbox.getLookupNodeService() :
new LookupNodeService(lookupTier);
final DiscoveryDruidNode discoveryDruidNode = new DiscoveryDruidNode(
toolbox.getDruidNode(),
NodeType.PEON,
ImmutableMap.of(
toolbox.getDataNodeService().getName(), toolbox.getDataNodeService(),
lookupNodeService.getName(), lookupNodeService
)
);
Throwable caughtExceptionOuter = null;
try (final RecordSupplier recordSupplier = task.newTaskRecordSupplier()) {
if (appenderatorsManager.shouldTaskMakeNodeAnnouncements()) {
toolbox.getDataSegmentServerAnnouncer().announce();
toolbox.getDruidNodeAnnouncer().announce(discoveryDruidNode);
}
appenderator = task.newAppenderator(fireDepartmentMetrics, toolbox);
driver = task.newDriver(appenderator, toolbox, fireDepartmentMetrics);
// Start up, set up initial sequences.
final Object restoredMetadata = driver.startJob(
segmentId -> {
try {
if (lockGranularityToUse == LockGranularity.SEGMENT) {
return toolbox.getTaskActionClient().submit(
new SegmentLockAcquireAction(
TaskLockType.EXCLUSIVE,
segmentId.getInterval(),
segmentId.getVersion(),
segmentId.getShardSpec().getPartitionNum(),
1000L
)
).isOk();
} else {
return toolbox.getTaskActionClient().submit(
new TimeChunkLockAcquireAction(
TaskLockType.EXCLUSIVE,
segmentId.getInterval(),
1000L
)
) != null;
}
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
);
if (restoredMetadata == null) {
// no persist has happened so far
// so either this is a brand new task or replacement of a failed task
Preconditions.checkState(sequences.get(0).startOffsets.entrySet().stream().allMatch(
partitionOffsetEntry ->
createSequenceNumber(partitionOffsetEntry.getValue()).compareTo(
createSequenceNumber(ioConfig.getStartSequenceNumbers()
.getPartitionSequenceNumberMap()
.get(partitionOffsetEntry.getKey())
)) >= 0
), "Sequence sequences are not compatible with start sequences of task");
currOffsets.putAll(sequences.get(0).startOffsets);
} else {
@SuppressWarnings("unchecked")
final Map restoredMetadataMap = (Map) restoredMetadata;
final SeekableStreamEndSequenceNumbers restoredNextPartitions =
deserializePartitionsFromMetadata(
toolbox.getObjectMapper(),
restoredMetadataMap.get(METADATA_NEXT_PARTITIONS)
);
currOffsets.putAll(restoredNextPartitions.getPartitionSequenceNumberMap());
// Sanity checks.
if (!restoredNextPartitions.getStream().equals(ioConfig.getStartSequenceNumbers().getStream())) {
throw new ISE(
"WTF?! Restored stream[%s] but expected stream[%s]",
restoredNextPartitions.getStream(),
ioConfig.getStartSequenceNumbers().getStream()
);
}
if (!currOffsets.keySet().equals(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet())) {
throw new ISE(
"WTF?! Restored partitions[%s] but expected partitions[%s]",
currOffsets.keySet(),
ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet()
);
}
// sequences size can be 0 only when all sequences got published and task stopped before it could finish
// which is super rare
if (sequences.size() == 0 || getLastSequenceMetadata().isCheckpointed()) {
this.endOffsets.putAll(sequences.size() == 0
? currOffsets
: getLastSequenceMetadata().getEndOffsets());
log.info("End sequences changed to [%s]", endOffsets);
}
}
// Filter out partitions with END_OF_SHARD markers since these partitions have already been fully read. This
// should have been done by the supervisor already so this is defensive.
int numPreFilterPartitions = currOffsets.size();
if (currOffsets.entrySet().removeIf(x -> isEndOfShard(x.getValue()))) {
log.info(
"Removed [%d] partitions from assignment which have already been closed",
numPreFilterPartitions - currOffsets.size()
);
}
// Initialize lastReadOffsets immediately after restoring currOffsets. This is only done when end offsets are
// inclusive, because the point of initializing lastReadOffsets here is so we know when to skip the start record.
// When end offsets are exclusive, we never skip the start record.
if (!isEndOffsetExclusive()) {
for (Map.Entry entry : currOffsets.entrySet()) {
final boolean isAtStart = entry.getValue().equals(
ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().get(entry.getKey())
);
if (!isAtStart || ioConfig.getStartSequenceNumbers().getExclusivePartitions().contains(entry.getKey())) {
lastReadOffsets.put(entry.getKey(), entry.getValue());
}
}
}
// Set up committer.
final Supplier committerSupplier = () -> {
final Map snapshot = ImmutableMap.copyOf(currOffsets);
lastPersistedOffsets.clear();
lastPersistedOffsets.putAll(snapshot);
return new Committer()
{
@Override
public Object getMetadata()
{
return ImmutableMap.of(METADATA_NEXT_PARTITIONS, new SeekableStreamEndSequenceNumbers<>(stream, snapshot));
}
@Override
public void run()
{
// Do nothing.
}
};
};
// restart publishing of sequences (if any)
maybePersistAndPublishSequences(committerSupplier);
Set> assignment = assignPartitions(recordSupplier);
possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment);
seekToStartingSequence(recordSupplier, assignment);
ingestionState = IngestionState.BUILD_SEGMENTS;
// Main loop.
// Could eventually support leader/follower mode (for keeping replicas more in sync)
boolean stillReading = !assignment.isEmpty();
status = Status.READING;
Throwable caughtExceptionInner = null;
try {
while (stillReading) {
if (possiblyPause()) {
// The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
// partitions upon resuming. Don't call "seekToStartingSequence" after "assignPartitions", because there's
// no need to re-seek here. All we're going to be doing is dropping partitions.
assignment = assignPartitions(recordSupplier);
possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment);
if (assignment.isEmpty()) {
log.info("All partitions have been fully read");
publishOnStop.set(true);
stopRequested.set(true);
}
}
// if stop is requested or task's end sequence is set by call to setEndOffsets method with finish set to true
if (stopRequested.get() || sequences.size() == 0 || getLastSequenceMetadata().isCheckpointed()) {
status = Status.PUBLISHING;
}
if (stopRequested.get()) {
break;
}
if (backgroundThreadException != null) {
throw new RuntimeException(backgroundThreadException);
}
checkPublishAndHandoffFailure();
maybePersistAndPublishSequences(committerSupplier);
// calling getRecord() ensures that exceptions specific to kafka/kinesis like OffsetOutOfRangeException
// are handled in the subclasses.
List> records = getRecords(
recordSupplier,
toolbox
);
// note: getRecords() also updates assignment
stillReading = !assignment.isEmpty();
SequenceMetadata sequenceToCheckpoint = null;
for (OrderedPartitionableRecord record : records) {
final boolean shouldProcess = verifyRecordInRange(record.getPartitionId(), record.getSequenceNumber());
log.trace(
"Got stream[%s] partition[%s] sequenceNumber[%s], shouldProcess[%s].",
record.getStream(),
record.getPartitionId(),
record.getSequenceNumber(),
shouldProcess
);
if (shouldProcess) {
try {
final List valueBytess = record.getData();
final List rows;
if (valueBytess == null || valueBytess.isEmpty()) {
rows = Utils.nullableListOf((InputRow) null);
} else {
rows = new ArrayList<>();
for (byte[] valueBytes : valueBytess) {
rows.addAll(parser.parseBatch(ByteBuffer.wrap(valueBytes)));
}
}
boolean isPersistRequired = false;
final SequenceMetadata sequenceToUse = sequences
.stream()
.filter(sequenceMetadata -> sequenceMetadata.canHandle(this, record))
.findFirst()
.orElse(null);
if (sequenceToUse == null) {
throw new ISE(
"WTH?! cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s",
record.getPartitionId(),
record.getSequenceNumber(),
sequences
);
}
for (InputRow row : rows) {
if (row != null && task.withinMinMaxRecordTime(row)) {
final AppenderatorDriverAddResult addResult = driver.add(
row,
sequenceToUse.getSequenceName(),
committerSupplier,
true,
// do not allow incremental persists to happen until all the rows from this batch
// of rows are indexed
false
);
if (addResult.isOk()) {
// If the number of rows in the segment exceeds the threshold after adding a row,
// move the segment out from the active segments of BaseAppenderatorDriver to make a new segment.
final boolean isPushRequired = addResult.isPushRequired(
tuningConfig.getPartitionsSpec().getMaxRowsPerSegment(),
tuningConfig.getPartitionsSpec().getMaxTotalRows()
);
if (isPushRequired && !sequenceToUse.isCheckpointed()) {
sequenceToCheckpoint = sequenceToUse;
}
isPersistRequired |= addResult.isPersistRequired();
} else {
// Failure to allocate segment puts determinism at risk, bail out to be safe.
// May want configurable behavior here at some point.
// If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp());
}
if (addResult.getParseException() != null) {
handleParseException(addResult.getParseException(), record);
} else {
rowIngestionMeters.incrementProcessed();
}
} else {
rowIngestionMeters.incrementThrownAway();
}
}
if (isPersistRequired) {
Futures.addCallback(
driver.persistAsync(committerSupplier.get()),
new FutureCallback
© 2015 - 2025 Weber Informatics LLC | Privacy Policy