All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.flink.cep.nfa.NFA Maven / Gradle / Ivy

There is a newer version: 2.0-preview1
Show newest version
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.flink.cep.nfa;

import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.functions.DefaultOpenContext;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.functions.util.FunctionUtils;
import org.apache.flink.api.common.typeutils.CompositeTypeSerializerSnapshot;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.common.typeutils.TypeSerializerSnapshot;
import org.apache.flink.api.common.typeutils.base.StringSerializer;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.cep.nfa.aftermatch.AfterMatchSkipStrategy;
import org.apache.flink.cep.nfa.compiler.NFACompiler;
import org.apache.flink.cep.nfa.sharedbuffer.EventId;
import org.apache.flink.cep.nfa.sharedbuffer.NodeId;
import org.apache.flink.cep.nfa.sharedbuffer.SharedBuffer;
import org.apache.flink.cep.nfa.sharedbuffer.SharedBufferAccessor;
import org.apache.flink.cep.pattern.conditions.IterativeCondition;
import org.apache.flink.cep.time.TimerService;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.memory.DataInputView;
import org.apache.flink.core.memory.DataOutputView;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.CollectionUtil;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.Preconditions;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Stack;

import static org.apache.flink.cep.nfa.MigrationUtils.deserializeComputationStates;

/**
 * Non-deterministic finite automaton implementation.
 *
 * 

The {@link org.apache.flink.cep.operator.CepOperator CEP operator} keeps one NFA per key, for * keyed input streams, and a single global NFA for non-keyed ones. When an event gets processed, it * updates the NFA's internal state machine. * *

An event that belongs to a partially matched sequence is kept in an internal {@link * SharedBuffer buffer}, which is a memory-optimized data-structure exactly for this purpose. Events * in the buffer are removed when all the matched sequences that contain them are: * *

    *
  1. emitted (success) *
  2. discarded (patterns containing NOT) *
  3. timed-out (windowed patterns) *
* *

The implementation is strongly based on the paper "Efficient Pattern Matching over Event * Streams". * * @param Type of the processed events * @see * https://people.cs.umass.edu/~yanlei/publications/sase-sigmod08.pdf */ public class NFA { /** * A set of all the valid NFA states, as returned by the {@link NFACompiler NFACompiler}. These * are directly derived from the user-specified pattern. */ private final Map> states; /** * The lengths of a windowed pattern, as specified using the {@link * org.apache.flink.cep.pattern.Pattern#within(Time, WithinType)} Pattern.within(Time, * WithinType)} method with {@code WithinType.PREVIOUS_AND_CURRENT}. */ private final Map windowTimes; /** * The length of a windowed pattern, as specified using the {@link * org.apache.flink.cep.pattern.Pattern#within(Time) Pattern.within(Time)} method. */ private final long windowTime; /** * A flag indicating if we want timed-out patterns (in case of windowed patterns) to be emitted * ({@code true}), or silently discarded ({@code false}). */ private final boolean handleTimeout; public NFA( final Collection> validStates, final Map windowTimes, final long windowTime, final boolean handleTimeout) { this.windowTime = windowTime; this.handleTimeout = handleTimeout; this.states = loadStates(validStates); this.windowTimes = windowTimes; } private Map> loadStates(final Collection> validStates) { Map> tmp = CollectionUtil.newHashMapWithExpectedSize(4); for (State state : validStates) { tmp.put(state.getName(), state); } return Collections.unmodifiableMap(tmp); } public long getWindowTime() { return windowTime; } @VisibleForTesting public Collection> getStates() { return states.values(); } public NFAState createInitialNFAState() { Queue startingStates = new LinkedList<>(); for (State state : states.values()) { if (state.isStart()) { startingStates.add(ComputationState.createStartState(state.getName())); } } return new NFAState(startingStates); } private State getState(ComputationState state) { return states.get(state.getCurrentStateName()); } private boolean isStartState(ComputationState state) { State stateObject = getState(state); if (stateObject == null) { throw new FlinkRuntimeException( "State " + state.getCurrentStateName() + " does not exist in the NFA. NFA has states " + states.values()); } return stateObject.isStart(); } private boolean isStopState(ComputationState state) { State stateObject = getState(state); if (stateObject == null) { throw new FlinkRuntimeException( "State " + state.getCurrentStateName() + " does not exist in the NFA. NFA has states " + states.values()); } return stateObject.isStop(); } private boolean isFinalState(ComputationState state) { State stateObject = getState(state); if (stateObject == null) { throw new FlinkRuntimeException( "State " + state.getCurrentStateName() + " does not exist in the NFA. NFA has states " + states.values()); } return stateObject.isFinal(); } /** * Initialization method for the NFA. It is called before any element is passed and thus * suitable for one time setup work. * * @param cepRuntimeContext runtime context of the enclosing operator * @param conf The configuration containing the parameters attached to the contract. */ public void open(RuntimeContext cepRuntimeContext, Configuration conf) throws Exception { for (State state : getStates()) { for (StateTransition transition : state.getStateTransitions()) { IterativeCondition condition = transition.getCondition(); FunctionUtils.setFunctionRuntimeContext(condition, cepRuntimeContext); FunctionUtils.openFunction(condition, DefaultOpenContext.INSTANCE); } } } /** Tear-down method for the NFA. */ public void close() throws Exception { for (State state : getStates()) { for (StateTransition transition : state.getStateTransitions()) { IterativeCondition condition = transition.getCondition(); FunctionUtils.closeFunction(condition); } } } /** * Processes the next input event. If some of the computations reach a final state then the * resulting event sequences are returned. If computations time out and timeout handling is * activated, then the timed out event patterns are returned. * *

If computations reach a stop state, the path forward is discarded and currently * constructed path is returned with the element that resulted in the stop state. * * @param sharedBufferAccessor the accessor to SharedBuffer object that we need to work upon * while processing * @param nfaState The NFAState object that we need to affect while processing * @param event The current event to be processed or null if only pruning shall be done * @param timestamp The timestamp of the current event * @param afterMatchSkipStrategy The skip strategy to use after per match * @param timerService gives access to processing time and time characteristic, needed for * condition evaluation * @return Tuple of the collection of matched patterns (e.g. the result of computations which * have reached a final state) and the collection of timed out patterns (if timeout handling * is activated) * @throws Exception Thrown if the system cannot access the state. */ public Collection>> process( final SharedBufferAccessor sharedBufferAccessor, final NFAState nfaState, final T event, final long timestamp, final AfterMatchSkipStrategy afterMatchSkipStrategy, final TimerService timerService) throws Exception { try (EventWrapper eventWrapper = new EventWrapper(event, timestamp, sharedBufferAccessor)) { return doProcess( sharedBufferAccessor, nfaState, eventWrapper, afterMatchSkipStrategy, timerService); } } /** * Prunes states assuming there will be no events with timestamp lower than the given * one. It clears the sharedBuffer and also emits all timed out partial matches. * * @param sharedBufferAccessor the accessor to SharedBuffer object that we need to work upon * while processing * @param nfaState The NFAState object that we need to affect while processing * @param timestamp timestamp that indicates that there will be no more events with lower * timestamp * @return all pending matches and timed outed partial matches * @throws Exception Thrown if the system cannot access the state. */ public Tuple2>>, Collection>, Long>>> advanceTime( final SharedBufferAccessor sharedBufferAccessor, final NFAState nfaState, final long timestamp, final AfterMatchSkipStrategy afterMatchSkipStrategy) throws Exception { final List>> result = new ArrayList<>(); final Collection>, Long>> timeoutResult = new ArrayList<>(); final PriorityQueue newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); final PriorityQueue potentialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); for (ComputationState computationState : nfaState.getPartialMatches()) { String currentStateName = computationState.getCurrentStateName(); boolean isTimeoutForPreviousEvent = windowTimes.containsKey(currentStateName) && isStateTimedOut( computationState, timestamp, computationState.getPreviousTimestamp(), windowTimes.get(currentStateName)); boolean isTimeoutForFirstEvent = isStateTimedOut( computationState, timestamp, computationState.getStartTimestamp(), windowTime); if (isTimeoutForPreviousEvent || isTimeoutForFirstEvent) { nfaState.setStateChanged(); if (getState(computationState).isPending()) { // save pending states for after-match pruning, where those states will be // released potentialMatches.add(computationState); continue; } if (handleTimeout) { // extract the timed out event pattern Map> timedOutPattern = sharedBufferAccessor.materializeMatch( extractCurrentMatches(sharedBufferAccessor, computationState)); timeoutResult.add( Tuple2.of( timedOutPattern, isTimeoutForPreviousEvent ? computationState.getPreviousTimestamp() + windowTimes.get( computationState.getCurrentStateName()) : computationState.getStartTimestamp() + windowTime)); } // release timeout states sharedBufferAccessor.releaseNode( computationState.getPreviousBufferEntry(), computationState.getVersion()); } else { newPartialMatches.add(computationState); } } // If a timeout partial match "frees" some completed matches // Or if completed not-followed-by matches need pruning processMatchesAccordingToSkipStrategy( sharedBufferAccessor, nfaState, afterMatchSkipStrategy, potentialMatches, newPartialMatches, result); nfaState.setNewPartialMatches(newPartialMatches); sharedBufferAccessor.advanceTime(timestamp); return Tuple2.of(result, timeoutResult); } private boolean isStateTimedOut( final ComputationState state, final long timestamp, final long startTimestamp, final long windowTime) { return !isStartState(state) && windowTime > 0L && timestamp - startTimestamp >= windowTime; } private Collection>> doProcess( final SharedBufferAccessor sharedBufferAccessor, final NFAState nfaState, final EventWrapper event, final AfterMatchSkipStrategy afterMatchSkipStrategy, final TimerService timerService) throws Exception { final PriorityQueue newPartialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); PriorityQueue potentialMatches = new PriorityQueue<>(NFAState.COMPUTATION_STATE_COMPARATOR); // iterate over all current computations for (ComputationState computationState : nfaState.getPartialMatches()) { final Collection newComputationStates = computeNextStates(sharedBufferAccessor, computationState, event, timerService); if (newComputationStates.size() != 1) { nfaState.setStateChanged(); } else if (!newComputationStates.iterator().next().equals(computationState)) { nfaState.setStateChanged(); } // delay adding new computation states in case a stop state is reached and we discard // the path. final Collection statesToRetain = new ArrayList<>(); // if stop state reached in this path boolean shouldDiscardPath = false; for (final ComputationState newComputationState : newComputationStates) { if (isStartState(computationState) && newComputationState.getStartTimestamp() > 0) { nfaState.setNewStartPartiailMatch(); } if (isFinalState(newComputationState)) { potentialMatches.add(newComputationState); } else if (isStopState(newComputationState)) { // reached stop state. release entry for the stop state shouldDiscardPath = true; sharedBufferAccessor.releaseNode( newComputationState.getPreviousBufferEntry(), newComputationState.getVersion()); } else { // add new computation state; it will be processed once the next event arrives statesToRetain.add(newComputationState); } } if (shouldDiscardPath) { // a stop state was reached in this branch. release branch which results in removing // previous event from // the buffer for (final ComputationState state : statesToRetain) { sharedBufferAccessor.releaseNode( state.getPreviousBufferEntry(), state.getVersion()); } } else { newPartialMatches.addAll(statesToRetain); } } if (!potentialMatches.isEmpty()) { nfaState.setStateChanged(); } List>> result = new ArrayList<>(); processMatchesAccordingToSkipStrategy( sharedBufferAccessor, nfaState, afterMatchSkipStrategy, potentialMatches, newPartialMatches, result); nfaState.setNewPartialMatches(newPartialMatches); return result; } private void processMatchesAccordingToSkipStrategy( SharedBufferAccessor sharedBufferAccessor, NFAState nfaState, AfterMatchSkipStrategy afterMatchSkipStrategy, PriorityQueue potentialMatches, PriorityQueue partialMatches, List>> result) throws Exception { nfaState.getCompletedMatches().addAll(potentialMatches); ComputationState earliestMatch; while ((earliestMatch = nfaState.getCompletedMatches().peek()) != null) { // Care for ordering when it's not NO_SKIP if (afterMatchSkipStrategy.isSkipStrategy()) { ComputationState earliestPartialMatch = partialMatches.peek(); if (earliestPartialMatch != null && !isEarlier(earliestMatch, earliestPartialMatch)) { break; } } nfaState.setStateChanged(); nfaState.getCompletedMatches().poll(); List>> matchedResult = sharedBufferAccessor.extractPatterns( earliestMatch.getPreviousBufferEntry(), earliestMatch.getVersion()); afterMatchSkipStrategy.prune(partialMatches, matchedResult, sharedBufferAccessor); afterMatchSkipStrategy.prune( nfaState.getCompletedMatches(), matchedResult, sharedBufferAccessor); result.add(sharedBufferAccessor.materializeMatch(matchedResult.get(0))); sharedBufferAccessor.releaseNode( earliestMatch.getPreviousBufferEntry(), earliestMatch.getVersion()); } nfaState.getPartialMatches() .removeIf(pm -> pm.getStartEventID() != null && !partialMatches.contains(pm)); } private boolean isEarlier( ComputationState earliestMatch, ComputationState earliestPartialMatch) { return NFAState.COMPUTATION_STATE_COMPARATOR.compare(earliestMatch, earliestPartialMatch) <= 0; } private static boolean isEquivalentState(final State s1, final State s2) { return s1.getName().equals(s2.getName()); } /** * Class for storing resolved transitions. It counts at insert time the number of branching * transitions both for IGNORE and TAKE actions. */ private static class OutgoingEdges { private List> edges = new ArrayList<>(); private final State currentState; private int totalTakeBranches = 0; private int totalIgnoreBranches = 0; OutgoingEdges(final State currentState) { this.currentState = currentState; } void add(StateTransition edge) { if (!isSelfIgnore(edge)) { if (edge.getAction() == StateTransitionAction.IGNORE) { totalIgnoreBranches++; } else if (edge.getAction() == StateTransitionAction.TAKE) { totalTakeBranches++; } } edges.add(edge); } int getTotalIgnoreBranches() { return totalIgnoreBranches; } int getTotalTakeBranches() { return totalTakeBranches; } List> getEdges() { return edges; } private boolean isSelfIgnore(final StateTransition edge) { return isEquivalentState(edge.getTargetState(), currentState) && edge.getAction() == StateTransitionAction.IGNORE; } } /** * Helper class that ensures event is registered only once throughout the life of this object * and released on close of this object. This allows to wrap whole processing of the event with * try-with-resources block. */ private class EventWrapper implements AutoCloseable { private final T event; private long timestamp; private final SharedBufferAccessor sharedBufferAccessor; private EventId eventId; EventWrapper(T event, long timestamp, SharedBufferAccessor sharedBufferAccessor) { this.event = event; this.timestamp = timestamp; this.sharedBufferAccessor = sharedBufferAccessor; } EventId getEventId() throws Exception { if (eventId == null) { this.eventId = sharedBufferAccessor.registerEvent(event, timestamp); } return eventId; } T getEvent() { return event; } public long getTimestamp() { return timestamp; } @Override public void close() throws Exception { if (eventId != null) { sharedBufferAccessor.releaseEvent(eventId); } } } /** * Computes the next computation states based on the given computation state, the current event, * its timestamp and the internal state machine. The algorithm is: * *

    *
  1. Decide on valid transitions and number of branching paths. See {@link OutgoingEdges} *
  2. Perform transitions: *
      *
    1. IGNORE (links in {@link SharedBuffer} will still point to the previous event) *
        *
      • do not perform for Start State - special case *
      • if stays in the same state increase the current stage for future use with * number of outgoing edges *
      • if after PROCEED increase current stage and add new stage (as we change the * state) *
      • lock the entry in {@link SharedBuffer} as it is needed in the created * branch *
      *
    2. TAKE (links in {@link SharedBuffer} will point to the current event) *
        *
      • add entry to the shared buffer with version of the current computation * state *
      • add stage and then increase with number of takes for the future computation * states *
      • peek to the next state if it has PROCEED path to a Final State, if true * create Final ComputationState to emit results *
      *
    *
  3. Handle the Start State, as it always have to remain *
  4. Release the corresponding entries in {@link SharedBuffer}. *
* * @param sharedBufferAccessor The accessor to shared buffer that we need to change * @param computationState Current computation state * @param event Current event which is processed * @param timerService timer service which provides access to time related features * @return Collection of computation states which result from the current one * @throws Exception Thrown if the system cannot access the state. */ private Collection computeNextStates( final SharedBufferAccessor sharedBufferAccessor, final ComputationState computationState, final EventWrapper event, final TimerService timerService) throws Exception { final ConditionContext context = new ConditionContext( sharedBufferAccessor, computationState, timerService, event.getTimestamp()); final OutgoingEdges outgoingEdges = createDecisionGraph(context, computationState, event.getEvent()); // Create the computing version based on the previously computed edges // We need to defer the creation of computation states until we know how many edges start // at this computation state so that we can assign proper version final List> edges = outgoingEdges.getEdges(); int takeBranchesToVisit = Math.max(0, outgoingEdges.getTotalTakeBranches() - 1); int ignoreBranchesToVisit = outgoingEdges.getTotalIgnoreBranches(); int totalTakeToSkip = Math.max(0, outgoingEdges.getTotalTakeBranches() - 1); final List resultingComputationStates = new ArrayList<>(); for (StateTransition edge : edges) { switch (edge.getAction()) { case IGNORE: { if (!isStartState(computationState)) { final DeweyNumber version; if (isEquivalentState( edge.getTargetState(), getState(computationState))) { // Stay in the same state (it can be either looping one or // singleton) final int toIncrease = calculateIncreasingSelfState( outgoingEdges.getTotalIgnoreBranches(), outgoingEdges.getTotalTakeBranches()); version = computationState.getVersion().increase(toIncrease); } else { // IGNORE after PROCEED version = computationState .getVersion() .increase(totalTakeToSkip + ignoreBranchesToVisit) .addStage(); ignoreBranchesToVisit--; } addComputationState( sharedBufferAccessor, resultingComputationStates, edge.getTargetState(), computationState.getPreviousBufferEntry(), version, computationState.getStartTimestamp(), computationState.getPreviousTimestamp(), computationState.getStartEventID()); } } break; case TAKE: final State nextState = edge.getTargetState(); final State currentState = edge.getSourceState(); final NodeId previousEntry = computationState.getPreviousBufferEntry(); final DeweyNumber currentVersion = computationState.getVersion().increase(takeBranchesToVisit); final DeweyNumber nextVersion = new DeweyNumber(currentVersion).addStage(); takeBranchesToVisit--; final NodeId newEntry = sharedBufferAccessor.put( currentState.getName(), event.getEventId(), previousEntry, currentVersion); final long startTimestamp; final EventId startEventId; if (isStartState(computationState)) { startTimestamp = event.getTimestamp(); startEventId = event.getEventId(); } else { startTimestamp = computationState.getStartTimestamp(); startEventId = computationState.getStartEventID(); } final long previousTimestamp = event.getTimestamp(); addComputationState( sharedBufferAccessor, resultingComputationStates, nextState, newEntry, nextVersion, startTimestamp, previousTimestamp, startEventId); // check if newly created state is optional (have a PROCEED path to Final state) final State finalState = findFinalStateAfterProceed(context, nextState, event.getEvent()); if (finalState != null) { addComputationState( sharedBufferAccessor, resultingComputationStates, finalState, newEntry, nextVersion, startTimestamp, previousTimestamp, startEventId); } break; } } if (isStartState(computationState)) { int totalBranches = calculateIncreasingSelfState( outgoingEdges.getTotalIgnoreBranches(), outgoingEdges.getTotalTakeBranches()); DeweyNumber startVersion = computationState.getVersion().increase(totalBranches); ComputationState startState = ComputationState.createStartState( computationState.getCurrentStateName(), startVersion); resultingComputationStates.add(startState); } if (computationState.getPreviousBufferEntry() != null) { // release the shared entry referenced by the current computation state. sharedBufferAccessor.releaseNode( computationState.getPreviousBufferEntry(), computationState.getVersion()); } return resultingComputationStates; } private void addComputationState( SharedBufferAccessor sharedBufferAccessor, List computationStates, State currentState, NodeId previousEntry, DeweyNumber version, long startTimestamp, long previousTimestamp, EventId startEventId) throws Exception { ComputationState computationState = ComputationState.createState( currentState.getName(), previousEntry, version, startTimestamp, previousTimestamp, startEventId); computationStates.add(computationState); sharedBufferAccessor.lockNode(previousEntry, computationState.getVersion()); } private State findFinalStateAfterProceed(ConditionContext context, State state, T event) { final Stack> statesToCheck = new Stack<>(); statesToCheck.push(state); try { while (!statesToCheck.isEmpty()) { final State currentState = statesToCheck.pop(); for (StateTransition transition : currentState.getStateTransitions()) { if (transition.getAction() == StateTransitionAction.PROCEED && checkFilterCondition(context, transition.getCondition(), event)) { if (transition.getTargetState().isFinal()) { return transition.getTargetState(); } else { statesToCheck.push(transition.getTargetState()); } } } } } catch (Exception e) { throw new FlinkRuntimeException("Failure happened in filter function.", e); } return null; } private int calculateIncreasingSelfState(int ignoreBranches, int takeBranches) { return takeBranches == 0 && ignoreBranches == 0 ? 0 : ignoreBranches + Math.max(1, takeBranches); } private OutgoingEdges createDecisionGraph( ConditionContext context, ComputationState computationState, T event) { State state = getState(computationState); final OutgoingEdges outgoingEdges = new OutgoingEdges<>(state); final Stack> states = new Stack<>(); states.push(state); // First create all outgoing edges, so to be able to reason about the Dewey version while (!states.isEmpty()) { State currentState = states.pop(); Collection> stateTransitions = currentState.getStateTransitions(); // check all state transitions for each state for (StateTransition stateTransition : stateTransitions) { try { if (checkFilterCondition(context, stateTransition.getCondition(), event)) { // filter condition is true switch (stateTransition.getAction()) { case PROCEED: // simply advance the computation state, but apply the current event // to it // PROCEED is equivalent to an epsilon transition states.push(stateTransition.getTargetState()); break; case IGNORE: case TAKE: outgoingEdges.add(stateTransition); break; } } } catch (Exception e) { throw new FlinkRuntimeException("Failure happened in filter function.", e); } } } return outgoingEdges; } private boolean checkFilterCondition( ConditionContext context, IterativeCondition condition, T event) throws Exception { return condition == null || condition.filter(event, context); } /** * Extracts all the sequences of events from the start to the given computation state. An event * sequence is returned as a map which contains the events and the names of the states to which * the events were mapped. * * @param sharedBufferAccessor The accessor to {@link SharedBuffer} from which to extract the * matches * @param computationState The end computation state of the extracted event sequences * @return Collection of event sequences which end in the given computation state * @throws Exception Thrown if the system cannot access the state. */ private Map> extractCurrentMatches( final SharedBufferAccessor sharedBufferAccessor, final ComputationState computationState) throws Exception { if (computationState.getPreviousBufferEntry() == null) { return new HashMap<>(); } List>> paths = sharedBufferAccessor.extractPatterns( computationState.getPreviousBufferEntry(), computationState.getVersion()); if (paths.isEmpty()) { return new HashMap<>(); } // for a given computation state, we cannot have more than one matching patterns. Preconditions.checkState(paths.size() == 1); return paths.get(0); } /** The context used when evaluating this computation state. */ private class ConditionContext implements IterativeCondition.Context { private final TimerService timerService; private final long eventTimestamp; /** The current computation state. */ private ComputationState computationState; /** * The matched pattern so far. A condition will be evaluated over this pattern. This is * evaluated only once, as this is an expensive operation that traverses a path in * the {@link SharedBuffer}. */ private Map> matchedEvents; private SharedBufferAccessor sharedBufferAccessor; ConditionContext( final SharedBufferAccessor sharedBufferAccessor, final ComputationState computationState, final TimerService timerService, final long eventTimestamp) { this.computationState = computationState; this.sharedBufferAccessor = sharedBufferAccessor; this.timerService = timerService; this.eventTimestamp = eventTimestamp; } @Override public Iterable getEventsForPattern(final String key) throws Exception { Preconditions.checkNotNull(key); // the (partially) matched pattern is computed lazily when this method is called. // this is to avoid any overheads when using a simple, non-iterative condition. if (matchedEvents == null) { this.matchedEvents = sharedBufferAccessor.materializeMatch( extractCurrentMatches(sharedBufferAccessor, computationState)); } return new Iterable() { @Override public Iterator iterator() { List elements = matchedEvents.get(key); return elements == null ? Collections.EMPTY_LIST.iterator() : elements.iterator(); } }; } @Override public long timestamp() { return eventTimestamp; } @Override public long currentProcessingTime() { return timerService.currentProcessingTime(); } } //////////////////// DEPRECATED/MIGRATION UTILS /** Wrapper for migrated state. */ public static class MigratedNFA { private final Queue computationStates; private final org.apache.flink.cep.nfa.SharedBuffer sharedBuffer; public org.apache.flink.cep.nfa.SharedBuffer getSharedBuffer() { return sharedBuffer; } public Queue getComputationStates() { return computationStates; } MigratedNFA( final Queue computationStates, final org.apache.flink.cep.nfa.SharedBuffer sharedBuffer) { this.sharedBuffer = sharedBuffer; this.computationStates = computationStates; } } /** A {@link TypeSerializerSnapshot} for the legacy {@link NFASerializer}. */ @SuppressWarnings("deprecation") public static final class MigratedNFASerializerSnapshot extends CompositeTypeSerializerSnapshot, NFASerializer> { private static final int VERSION = 2; public MigratedNFASerializerSnapshot() {} MigratedNFASerializerSnapshot(NFASerializer legacyNfaSerializer) { super(legacyNfaSerializer); } @Override protected int getCurrentOuterSnapshotVersion() { return VERSION; } @Override protected TypeSerializer[] getNestedSerializers(NFASerializer outerSerializer) { return new TypeSerializer[] { outerSerializer.eventSerializer, outerSerializer.sharedBufferSerializer }; } @Override protected NFASerializer createOuterSerializerWithNestedSerializers( TypeSerializer[] nestedSerializers) { @SuppressWarnings("unchecked") TypeSerializer eventSerializer = (TypeSerializer) nestedSerializers[0]; @SuppressWarnings("unchecked") TypeSerializer> sharedBufferSerializer = (TypeSerializer>) nestedSerializers[1]; return new NFASerializer<>(eventSerializer, sharedBufferSerializer); } } /** Only for backward compatibility with <=1.5. */ @Deprecated public static class NFASerializer extends TypeSerializer> { private static final long serialVersionUID = 2098282423980597010L; private final TypeSerializer> sharedBufferSerializer; private final TypeSerializer eventSerializer; public NFASerializer(TypeSerializer typeSerializer) { this( typeSerializer, new org.apache.flink.cep.nfa.SharedBuffer.SharedBufferSerializer<>( StringSerializer.INSTANCE, typeSerializer)); } NFASerializer( TypeSerializer typeSerializer, TypeSerializer> sharedBufferSerializer) { this.eventSerializer = typeSerializer; this.sharedBufferSerializer = sharedBufferSerializer; } @Override public boolean isImmutableType() { return false; } @Override public NFASerializer duplicate() { return new NFASerializer<>(eventSerializer.duplicate()); } @Override public MigratedNFA createInstance() { return null; } @Override public MigratedNFA copy(MigratedNFA from) { throw new UnsupportedOperationException(); } @Override public MigratedNFA copy(MigratedNFA from, MigratedNFA reuse) { return copy(from); } @Override public int getLength() { return -1; } @Override public void serialize(MigratedNFA record, DataOutputView target) { throw new UnsupportedOperationException(); } @Override public MigratedNFA deserialize(DataInputView source) throws IOException { MigrationUtils.skipSerializedStates(source); source.readLong(); source.readBoolean(); org.apache.flink.cep.nfa.SharedBuffer sharedBuffer = sharedBufferSerializer.deserialize(source); Queue computationStates = deserializeComputationStates(sharedBuffer, eventSerializer, source); return new MigratedNFA<>(computationStates, sharedBuffer); } @Override public MigratedNFA deserialize(MigratedNFA reuse, DataInputView source) throws IOException { return deserialize(source); } @Override public void copy(DataInputView source, DataOutputView target) { throw new UnsupportedOperationException(); } @Override public boolean equals(Object obj) { return obj == this || (obj != null && obj.getClass().equals(getClass()) && sharedBufferSerializer.equals( ((NFASerializer) obj).sharedBufferSerializer) && eventSerializer.equals(((NFASerializer) obj).eventSerializer)); } @Override public int hashCode() { return 37 * sharedBufferSerializer.hashCode() + eventSerializer.hashCode(); } @Override public MigratedNFASerializerSnapshot snapshotConfiguration() { return new MigratedNFASerializerSnapshot<>(this); } } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy