
de.learnlib.algorithms.kv.mealy.KearnsVaziraniMealy Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of learnlib-kearns-vazirani Show documentation
Show all versions of learnlib-kearns-vazirani Show documentation
The automata learning algorithm described by Kearns & Vazirani
/* Copyright (C) 2013-2018 TU Dortmund
* This file is part of LearnLib, http://www.learnlib.de/.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.learnlib.algorithms.kv.mealy;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.List;
import java.util.Objects;
import com.github.misberner.buildergen.annotations.GenerateBuilder;
import de.learnlib.acex.AcexAnalyzer;
import de.learnlib.acex.analyzers.AcexAnalyzers;
import de.learnlib.acex.impl.AbstractBaseCounterexample;
import de.learnlib.algorithms.kv.StateInfo;
import de.learnlib.api.algorithm.LearningAlgorithm.MealyLearner;
import de.learnlib.api.algorithm.feature.ResumableLearner;
import de.learnlib.api.algorithm.feature.SupportsGrowingAlphabet;
import de.learnlib.api.oracle.MembershipOracle;
import de.learnlib.api.query.DefaultQuery;
import de.learnlib.datastructure.discriminationtree.MultiDTree;
import de.learnlib.datastructure.discriminationtree.model.AbstractWordBasedDTNode;
import de.learnlib.datastructure.discriminationtree.model.AbstractWordBasedDiscriminationTree;
import de.learnlib.datastructure.discriminationtree.model.LCAInfo;
import de.learnlib.util.mealy.MealyUtil;
import net.automatalib.automata.transout.MealyMachine;
import net.automatalib.automata.transout.impl.compact.CompactMealy;
import net.automatalib.words.Alphabet;
import net.automatalib.words.Word;
import net.automatalib.words.impl.Alphabets;
/**
* An adaption of the Kearns/Vazirani algorithm for Mealy machines.
*
* @param
* input symbol type
* @param
* output symbol type
*
* @author Malte Isberner
*/
public class KearnsVaziraniMealy
implements MealyLearner, SupportsGrowingAlphabet, ResumableLearner> {
private Alphabet alphabet;
private final MembershipOracle> oracle;
private final boolean repeatedCounterexampleEvaluation;
private final AcexAnalyzer ceAnalyzer;
protected AbstractWordBasedDiscriminationTree, StateInfo>> discriminationTree;
protected List>> stateInfos = new ArrayList<>();
private CompactMealy hypothesis;
@GenerateBuilder
public KearnsVaziraniMealy(Alphabet alphabet,
MembershipOracle> oracle,
boolean repeatedCounterexampleEvaluation,
AcexAnalyzer counterexampleAnalyzer) {
this.alphabet = alphabet;
this.hypothesis = new CompactMealy<>(alphabet);
this.oracle = oracle;
this.repeatedCounterexampleEvaluation = repeatedCounterexampleEvaluation;
this.discriminationTree = new MultiDTree<>(oracle);
this.ceAnalyzer = counterexampleAnalyzer;
}
@Override
public void startLearning() {
initialize();
}
@Override
public boolean refineHypothesis(DefaultQuery> ceQuery) {
if (hypothesis.size() == 0) {
throw new IllegalStateException("Not initialized");
}
Word input = ceQuery.getInput();
Word output = ceQuery.getOutput();
if (!refineHypothesisSingle(input, output)) {
return false;
}
if (repeatedCounterexampleEvaluation) {
while (refineHypothesisSingle(input, output)) {
}
}
return true;
}
@Override
public MealyMachine, I, ?, O> getHypothesisModel() {
if (hypothesis.size() == 0) {
throw new IllegalStateException("Not started");
}
return hypothesis;
}
private boolean refineHypothesisSingle(Word input, Word output) {
int inputLen = input.length();
if (inputLen < 2) {
return false;
}
int mismatchIdx = MealyUtil.findMismatch(hypothesis, input, output);
if (mismatchIdx == MealyUtil.NO_MISMATCH) {
return false;
}
Word effInput = input.prefix(mismatchIdx + 1);
Word effOutput = output.prefix(mismatchIdx + 1);
KVAbstractCounterexample acex = new KVAbstractCounterexample(effInput, effOutput, oracle);
int idx = ceAnalyzer.analyzeAbstractCounterexample(acex, 0);
Word prefix = effInput.prefix(idx);
StateInfo> srcStateInfo = acex.getStateInfo(idx);
I sym = effInput.getSymbol(idx);
LCAInfo, AbstractWordBasedDTNode, StateInfo>>> lca = acex.getLCA(idx + 1);
assert lca != null;
splitState(srcStateInfo, prefix, sym, lca);
return true;
}
private void splitState(StateInfo> stateInfo,
Word newPrefix,
I sym,
LCAInfo, AbstractWordBasedDTNode, StateInfo>>> separatorInfo) {
int state = stateInfo.id;
// TLongList oldIncoming = stateInfo.fetchIncoming();
List oldIncoming = stateInfo.fetchIncoming(); // TODO: replace with primitive specialization
StateInfo> newStateInfo = createState(newPrefix);
AbstractWordBasedDTNode, StateInfo>> stateLeaf = stateInfo.dtNode;
AbstractWordBasedDTNode, StateInfo>> separator = separatorInfo.leastCommonAncestor;
Word newDiscriminator;
Word oldOut, newOut;
if (separator == null) {
newDiscriminator = Word.fromLetter(sym);
oldOut = separatorInfo.subtree1Label;
newOut = separatorInfo.subtree2Label;
} else {
newDiscriminator = newDiscriminator(sym, separator.getDiscriminator());
O transOut = hypothesis.getOutput(state, sym);
oldOut = newOutcome(transOut, separatorInfo.subtree1Label);
newOut = newOutcome(transOut, separatorInfo.subtree2Label);
}
final AbstractWordBasedDTNode, StateInfo>>.SplitResult sr =
stateLeaf.split(newDiscriminator, oldOut, newOut, newStateInfo);
stateInfo.dtNode = sr.nodeOld;
newStateInfo.dtNode = sr.nodeNew;
initState(newStateInfo);
updateTransitions(oldIncoming, stateLeaf);
}
private Word newOutcome(O transOutput, Word succOutcome) {
return succOutcome.prepend(transOutput);
}
// private void updateTransitions(TLongList transList, DTNode,StateInfo> oldDtTarget) {
private void updateTransitions(List transList,
AbstractWordBasedDTNode, StateInfo>> oldDtTarget) { // TODO: replace with primitive specialization
int numTrans = transList.size();
for (int i = 0; i < numTrans; i++) {
long encodedTrans = transList.get(i);
int sourceState = (int) (encodedTrans >> StateInfo.INTEGER_WORD_WIDTH);
int transIdx = (int) (encodedTrans);
StateInfo> sourceInfo = stateInfos.get(sourceState);
I symbol = alphabet.getSymbol(transIdx);
StateInfo> succInfo = sift(oldDtTarget, sourceInfo.accessSequence.append(symbol));
O output = hypothesis.getTransition(sourceState, transIdx).getOutput();
setTransition(sourceState, transIdx, succInfo, output);
}
}
private Word newDiscriminator(I symbol, Word succDiscriminator) {
return succDiscriminator.prepend(symbol);
}
private StateInfo> createInitialState() {
int state = hypothesis.addIntInitialState();
assert state == stateInfos.size();
StateInfo> stateInfo = new StateInfo<>(state, Word.epsilon());
stateInfos.add(stateInfo);
return stateInfo;
}
private StateInfo> createState(Word prefix) {
int state = hypothesis.addIntState();
assert state == stateInfos.size();
StateInfo> stateInfo = new StateInfo<>(state, prefix);
stateInfos.add(stateInfo);
return stateInfo;
}
private void initialize() {
StateInfo> init = createInitialState();
discriminationTree.getRoot().setData(init);
init.dtNode = discriminationTree.getRoot();
initState(init);
}
private void initState(StateInfo> stateInfo) {
int alphabetSize = alphabet.size();
int state = stateInfo.id;
Word accessSequence = stateInfo.accessSequence;
for (int i = 0; i < alphabetSize; i++) {
I sym = alphabet.getSymbol(i);
O output = oracle.answerQuery(accessSequence, Word.fromLetter(sym)).firstSymbol();
Word transAs = accessSequence.append(sym);
StateInfo> succInfo = sift(transAs);
setTransition(state, i, succInfo, output);
}
}
private void setTransition(int state, int symIdx, StateInfo> succInfo, O output) {
succInfo.addIncoming(state, symIdx);
hypothesis.setTransition(state, symIdx, succInfo.id, output);
}
private StateInfo> sift(Word prefix) {
return sift(discriminationTree.getRoot(), prefix);
}
private StateInfo> sift(AbstractWordBasedDTNode, StateInfo>> start,
Word prefix) {
AbstractWordBasedDTNode, StateInfo>> leaf = discriminationTree.sift(start, prefix);
StateInfo> succStateInfo = leaf.getData();
if (succStateInfo == null) {
// Special case: this is the *first* state with a different output
// for some discriminator
succStateInfo = createState(prefix);
leaf.setData(succStateInfo);
succStateInfo.dtNode = leaf;
initState(succStateInfo);
}
return succStateInfo;
}
@Override
public void addAlphabetSymbol(I symbol) {
if (this.alphabet.containsSymbol(symbol)) {
return;
}
final int inputIdx = this.alphabet.size();
this.hypothesis.addAlphabetSymbol(symbol);
// since we share the alphabet instance with our hypothesis, our alphabet might have already been updated (if it
// was already a GrowableAlphabet)
if (!this.alphabet.containsSymbol(symbol)) {
this.alphabet = Alphabets.withNewSymbol(this.alphabet, symbol);
}
// use new list to prevent concurrent modification exception
for (final StateInfo> si : new ArrayList<>(this.stateInfos)) {
final int state = si.id;
final Word accessSequence = si.accessSequence;
final Word transAs = accessSequence.append(symbol);
final O output = oracle.answerQuery(accessSequence, Word.fromLetter(symbol)).firstSymbol();
final StateInfo> succ = sift(transAs);
setTransition(state, inputIdx, succ, output);
}
}
@Override
public KearnsVaziraniMealyState suspend() {
return new KearnsVaziraniMealyState<>(hypothesis, discriminationTree, stateInfos);
}
@Override
public void resume(final KearnsVaziraniMealyState state) {
this.hypothesis = state.getHypothesis();
this.discriminationTree = state.getDiscriminationTree();
this.discriminationTree.setOracle(oracle);
this.stateInfos = state.getStateInfos();
}
static final class BuilderDefaults {
public static boolean repeatedCounterexampleEvaluation() {
return true;
}
public static AcexAnalyzer counterexampleAnalyzer() {
return AcexAnalyzers.LINEAR_FWD;
}
}
protected class KVAbstractCounterexample extends AbstractBaseCounterexample {
private final Word ceWord;
private final MembershipOracle> oracle;
private final StateInfo>[] states;
private final LCAInfo, AbstractWordBasedDTNode, StateInfo>>>[] lcas;
@SuppressWarnings("unchecked")
public KVAbstractCounterexample(Word ceWord, Word output, MembershipOracle> oracle) {
super(ceWord.length() + 1);
this.ceWord = ceWord;
this.oracle = oracle;
int m = ceWord.length();
this.states = new StateInfo[m + 1];
this.lcas = new LCAInfo[m + 1];
int currState = hypothesis.getIntInitialState();
int i = 0;
states[i++] = stateInfos.get(currState);
for (I sym : ceWord) {
currState = hypothesis.getSuccessor(currState, sym);
states[i++] = stateInfos.get(currState);
}
// Output of last transition separates hypothesis from target
O lastHypOut = hypothesis.getOutput(states[m - 1].id, ceWord.lastSymbol());
lcas[m] = new LCAInfo<>(null, Word.fromLetter(lastHypOut), Word.fromLetter(output.lastSymbol()));
super.setEffect(m, false);
}
public StateInfo> getStateInfo(int idx) {
return states[idx];
}
public LCAInfo, AbstractWordBasedDTNode, StateInfo>>> getLCA(int idx) {
return lcas[idx];
}
@Override
protected Boolean computeEffect(int index) {
Word prefix = ceWord.prefix(index);
StateInfo> info = states[index];
// Save the expected outcomes on the path from the leaf representing the state
// to the root on a stack
AbstractWordBasedDTNode, StateInfo>> node = info.dtNode;
Deque> expect = new ArrayDeque<>();
while (!node.isRoot()) {
expect.push(node.getParentOutcome());
node = node.getParent();
}
AbstractWordBasedDTNode, StateInfo>> currNode = discriminationTree.getRoot();
while (!expect.isEmpty()) {
Word suffix = currNode.getDiscriminator();
Word out = oracle.answerQuery(prefix, suffix);
Word e = expect.pop();
if (!Objects.equals(out, e)) {
lcas[index] = new LCAInfo<>(currNode, e, out);
return false;
}
currNode = currNode.child(out);
}
assert currNode.isLeaf() && expect.isEmpty();
return true;
}
@Override
public boolean checkEffects(Boolean eff1, Boolean eff2) {
return !eff1 || eff2;
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy