net.automatalib.util.automata.ads.BacktrackingSearch Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of automata-util Show documentation
Show all versions of automata-util Show documentation
This artifact provides various common utility operations for analyzing and manipulating
automata and graphs, such as traversal, minimization and copying.
/* Copyright (C) 2013-2019 TU Dortmund
* This file is part of AutomataLib, http://www.automatalib.net/.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.automatalib.util.automata.ads;
import java.util.BitSet;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Map;
import java.util.Optional;
import java.util.Queue;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Collectors;
import com.google.common.collect.Maps;
import net.automatalib.automata.concepts.StateIDs;
import net.automatalib.automata.transducers.MealyMachine;
import net.automatalib.commons.util.Pair;
import net.automatalib.graphs.ads.ADSNode;
import net.automatalib.graphs.ads.impl.ADSLeafNode;
import net.automatalib.graphs.ads.impl.ADSSymbolNode;
import net.automatalib.words.Alphabet;
import net.automatalib.words.Word;
/**
* A class containing methods for computing adaptive distinguishing sequences (for arbitrary sets of states) by means of
* a backtracking approach.
*
* @author frohme
*/
public final class BacktrackingSearch {
private BacktrackingSearch() {
}
/**
* Computes an ADS by constructing (growing) splitting words for the current set of states and recursively computing
* sub-ADSs for the induced partitions. May yield non-optimal ADSs.
*
* @param automaton
* The automaton for which an ADS should be computed
* @param input
* the input alphabet of the automaton
* @param states
* the set of states which should be distinguished by the computed ADS
* @param
* (hypothesis) state type
* @param
* input alphabet type
* @param
* output alphabet type
*
* @return {@code Optional.empty()} if there exists no ADS that distinguishes the given states, a valid ADS
* otherwise.
*/
public static Optional> compute(final MealyMachine automaton,
final Alphabet input,
final Set states) {
if (states.size() == 1) {
return ADS.compute(automaton, input, states);
}
final SplitTree node = new SplitTree<>(states);
node.getMapping().putAll(states.stream().collect(Collectors.toMap(Function.identity(), Function.identity())));
return compute(automaton, input, node);
}
/**
* See {@link #compute(MealyMachine, Alphabet, Set)}. Internal version, that uses the {@link SplitTree}
* representation.
*/
static Optional> compute(final MealyMachine automaton,
final Alphabet input,
final SplitTree node) {
return compute(automaton, input, node, node.getPartition().size());
}
private static Optional> compute(final MealyMachine automaton,
final Alphabet input,
final SplitTree node,
final int originalPartitionSize) {
final long maximumSplittingWordLength = ADSUtil.computeMaximumSplittingWordLength(automaton.size(),
node.getPartition().size(),
originalPartitionSize);
final Queue> splittingWordCandidates = new LinkedList<>();
final StateIDs stateIds = automaton.stateIDs();
final Set cache = new HashSet<>();
splittingWordCandidates.add(Word.epsilon());
candidateLoop:
while (!splittingWordCandidates.isEmpty()) {
final Word prefix = splittingWordCandidates.poll();
final Map currentToInitialMapping = node.getPartition()
.stream()
.collect(Collectors.toMap(x -> automaton.getSuccessor(x,
prefix),
Function.identity()));
final BitSet currentSetAsBitSet = new BitSet();
for (final S s : currentToInitialMapping.keySet()) {
currentSetAsBitSet.set(stateIds.getStateId(s));
}
if (cache.contains(currentSetAsBitSet)) {
continue candidateLoop;
}
oneSymbolFuture:
for (final I i : input) {
// compute successors
final Map> successors = new HashMap<>();
for (final Map.Entry entry : currentToInitialMapping.entrySet()) {
final S current = entry.getKey();
final S nextState = automaton.getSuccessor(current, i);
final O nextOutput = automaton.getOutput(current, i);
final SplitTree child;
if (!successors.containsKey(nextOutput)) {
child = new SplitTree<>(new HashSet<>());
successors.put(nextOutput, child);
} else {
child = successors.get(nextOutput);
}
// invalid input
if (!child.getPartition().add(nextState)) {
continue oneSymbolFuture;
}
child.getMapping().put(nextState, node.getMapping().get(entry.getValue()));
}
//splitting word
if (successors.size() > 1) {
final Map> results = new HashMap<>();
for (final Map.Entry> entry : successors.entrySet()) {
final SplitTree currentNode = entry.getValue();
final BitSet currentNodeAsBitSet = new BitSet();
for (final S s : currentNode.getPartition()) {
currentNodeAsBitSet.set(stateIds.getStateId(s));
}
if (cache.contains(currentNodeAsBitSet)) {
continue oneSymbolFuture;
}
final Optional> succ;
if (currentNode.getPartition().size() > 2) {
succ = BacktrackingSearch.compute(automaton, input, currentNode, originalPartitionSize);
} else {
succ = ADS.compute(automaton, input, currentNode);
}
if (!succ.isPresent()) {
cache.add(currentNodeAsBitSet);
continue oneSymbolFuture;
}
results.put(entry.getKey(), succ.get());
}
// create ADS (if we haven't continued until here)
final Pair, ADSNode> ads =
ADSUtil.buildFromTrace(automaton, prefix.append(i), node.getPartition().iterator().next());
final ADSNode head = ads.getFirst();
final ADSNode tail = ads.getSecond();
for (final Map.Entry> entry : results.entrySet()) {
entry.getValue().setParent(tail);
tail.getChildren().put(entry.getKey(), entry.getValue());
}
return Optional.of(head);
} else if (prefix.length() < maximumSplittingWordLength) { // no splitting word
splittingWordCandidates.add(prefix.append(i));
}
}
cache.add(currentSetAsBitSet);
}
return Optional.empty();
}
/**
* Computes an ADS by iterating over the successor tree in a breadth-first manner, yielding an optimal (dependant on
* the passed optimization function) ADS.
*
* @param automaton
* The automaton for which an ADS should be computed
* @param input
* the input alphabet of the automaton
* @param states
* the set of states which should be distinguished by the computed ADS
* @param costAggregator
* the optimization function by which solutions should be pruned
* @param
* (hypothesis) state type
* @param
* input alphabet type
* @param
* output alphabet type
*
* @return {@code Optional.empty()} if there exists no ADS that distinguishes the given states, a valid ADS
* otherwise.
*/
public static Optional> computeOptimal(final MealyMachine automaton,
final Alphabet input,
final Set states,
final CostAggregator costAggregator) {
if (states.size() == 1) {
return ADS.compute(automaton, input, states);
}
final Map, Optional>> searchStateCache = new HashMap<>();
final Set> traceCache = new HashSet<>();
final Optional> searchState = exploreSearchSpace(automaton,
input,
states,
costAggregator,
searchStateCache,
traceCache,
Integer.MAX_VALUE);
if (!searchState.isPresent()) {
return Optional.empty();
}
final Map initialMapping =
states.stream().collect(Collectors.toMap(Function.identity(), Function.identity()));
return Optional.of(constructADS(automaton, initialMapping, searchState.get()));
}
private static Optional> exploreSearchSpace(final MealyMachine automaton,
final Alphabet alphabet,
final Set targets,
final CostAggregator costAggregator,
final Map, Optional>> stateCache,
final Set> currentTraceCache,
final int costsBound) {
final Optional> cachedValue = stateCache.get(targets);
if (cachedValue != null) {
return cachedValue;
}
if (currentTraceCache.contains(targets)) {
return Optional.empty();
}
if (targets.size() == 1) {
final SearchState resultSS = new SearchState<>();
resultSS.costs = 0;
resultSS.successors = null;
resultSS.symbol = null;
final Optional> result = Optional.of(resultSS);
stateCache.put(targets, result);
return result;
}
// any further expansion would lead to a worse result, hence stop here.
if (costsBound == 0) {
return Optional.empty();
}
boolean foundValidSuccessor = false;
boolean convergingStates = true;
int bestCosts = costsBound;
Map> bestSuccessor = null;
I bestInputSymbol = null;
alphabetLoop:
for (final I i : alphabet) {
final boolean foundValidSuccessorForInputSymbol;
final int costsForInputSymbol;
final Map> successorsForInputSymbol;
// compute successors
final Map> successors = new HashMap<>();
for (final S s : targets) {
final S nextState = automaton.getSuccessor(s, i);
final O nextOutput = automaton.getOutput(s, i);
final Set child;
if (!successors.containsKey(nextOutput)) {
child = new HashSet<>();
successors.put(nextOutput, child);
} else {
child = successors.get(nextOutput);
}
// invalid input
if (!child.add(nextState)) {
continue alphabetLoop;
}
}
convergingStates = false;
if (successors.size() > 1) {
successorsForInputSymbol = Maps.newHashMapWithExpectedSize(successors.size());
int partitionCosts = 0;
for (final Map.Entry> entry : successors.entrySet()) {
final Optional> potentialResult = exploreSearchSpace(automaton,
alphabet,
entry.getValue(),
costAggregator,
stateCache,
new HashSet<>(),
bestCosts);
if (!potentialResult.isPresent()) {
continue alphabetLoop;
}
final SearchState subResult = potentialResult.get();
successorsForInputSymbol.put(entry.getKey(), subResult);
partitionCosts = costAggregator.apply(partitionCosts, subResult.costs);
if (partitionCosts >= bestCosts) {
continue alphabetLoop;
}
}
foundValidSuccessorForInputSymbol = true;
costsForInputSymbol = partitionCosts;
} else {
final Map.Entry> entry = successors.entrySet().iterator().next();
final Set nextTargets = entry.getValue();
final Set> nextTraceCache = new HashSet<>(currentTraceCache);
nextTraceCache.add(targets);
final Optional> potentialResult = exploreSearchSpace(automaton,
alphabet,
nextTargets,
costAggregator,
stateCache,
nextTraceCache,
bestCosts);
if (!potentialResult.isPresent()) {
continue alphabetLoop;
}
final SearchState subResult = potentialResult.get();
foundValidSuccessorForInputSymbol = true;
costsForInputSymbol = subResult.costs;
successorsForInputSymbol = Collections.singletonMap(entry.getKey(), subResult);
}
// update result
if (foundValidSuccessorForInputSymbol && costsForInputSymbol < bestCosts) {
foundValidSuccessor = true;
bestCosts = costsForInputSymbol;
bestSuccessor = successorsForInputSymbol;
bestInputSymbol = i;
}
}
if (convergingStates) {
stateCache.put(targets, Optional.empty());
return Optional.empty();
}
if (!foundValidSuccessor) {
return Optional.empty();
}
final SearchState resultSS = new SearchState<>();
resultSS.costs = bestCosts + 1;
resultSS.successors = bestSuccessor;
resultSS.symbol = bestInputSymbol;
final Optional> result = Optional.of(resultSS);
stateCache.put(targets, result);
return result;
}
private static ADSNode constructADS(final MealyMachine automaton,
final Map currentToInitialMapping,
final SearchState searchState) {
if (currentToInitialMapping.size() == 1) {
return new ADSLeafNode<>(null, currentToInitialMapping.values().iterator().next());
}
final I i = searchState.symbol;
final Map> successors = new HashMap<>();
for (final Map.Entry entry : currentToInitialMapping.entrySet()) {
final S current = entry.getKey();
final S nextState = automaton.getSuccessor(current, i);
final O nextOutput = automaton.getOutput(current, i);
final Map nextMapping;
if (!successors.containsKey(nextOutput)) {
nextMapping = new HashMap<>();
successors.put(nextOutput, nextMapping);
} else {
nextMapping = successors.get(nextOutput);
}
// invalid input
if (nextMapping.put(nextState, entry.getValue()) != null) {
throw new IllegalStateException();
}
}
final ADSNode result = new ADSSymbolNode<>(null, i);
for (final Map.Entry> entry : successors.entrySet()) {
final O output = entry.getKey();
final Map nextMapping = entry.getValue();
final ADSNode successor = constructADS(automaton, nextMapping, searchState.successors.get(output));
result.getChildren().put(output, successor);
successor.setParent(result);
}
return result;
}
/**
* Utility enum, that allows to specify the optimization criterion when performing and optimal ADS search. See
* {@link BacktrackingSearch#computeOptimal(MealyMachine, Alphabet, Set, CostAggregator)}.
*/
public enum CostAggregator implements BiFunction {
MIN_LENGTH() {
@Override
public Integer apply(Integer oldValue, Integer newValue) {
return Math.max(oldValue, newValue);
}
},
MIN_SIZE() {
@Override
public Integer apply(Integer oldValue, Integer newValue) {
return oldValue + newValue;
}
}
}
/**
* Internal utility class that encapsulates information of a node in a successor tree.
*
* @param
* (hypothesis) state type
* @param
* input alphabet type
* @param
* output alphabet type
*/
private static class SearchState {
private I symbol;
private Map> successors;
private int costs;
}
}