Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/**
* Copyright (C) 2015-2016, BMW Car IT GmbH and BMW AG
* Author: Stefan Holder ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.bmw.hmm;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* Implementation of the Viterbi algorithm for time-inhomogeneous Markov processes,
* meaning that the set of states and state transition probabilities are not necessarily fixed
* for all time steps. The plain Viterbi algorithm for stationary Markov processes is described e.g.
* in Rabiner, Juang, An introduction to Hidden Markov Models, IEEE ASSP Mag., pp 4-16, June 1986.
*
*
Generally expects logarithmic probabilities as input to prevent arithmetic underflows for
* small probability values.
*
*
This algorithm supports storing transition objects in
* {@link #nextStep(Object, Collection, Map, Map, Map)}. For instance if a HMM is
* used for map matching, this could be routes between road position candidates.
* The transition descriptors of the most likely sequence can be retrieved later in
* {@link SequenceState#transitionDescriptor} and hence do not need to be stored by the
* caller. Since the caller does not know in advance which transitions will occur in the most
* likely sequence, this reduces the number of transitions that need to be kept in memory
* from t*n² to t*n since only one transition descriptor is stored per back pointer,
* where t is the number of time steps and n the number of candidates per time step.
*
*
For long observation sequences, back pointers usually converge to a single path after a
* certain number of time steps. For instance, when matching GPS coordinates to roads, the last
* GPS positions in the trace usually do not affect the first road matches anymore.
* This implementation exploits this fact by letting the Java garbage collector
* take care of unreachable back pointers. If back pointers converge to a single path after a
* constant number of time steps, only O(t) back pointers and transition descriptors need to be
* stored in memory.
*
* @param the state type
* @param the observation type
* @param the transition descriptor type. Pass {@link Object} if transition descriptors are not
* needed.
*/
public class ViterbiAlgorithm {
/**
* Stores addition information for each candidate.
*/
private static class ExtendedState {
S state;
/**
* Back pointer to previous state candidate in the most likely sequence.
* Back pointers are chained using plain Java references.
* This allows garbage collection of unreachable back pointers.
*/
ExtendedState backPointer;
O observation;
D transitionDescriptor;
ExtendedState(S state,
ExtendedState backPointer,
O observation, D transitionDescriptor) {
this.state = state;
this.backPointer = backPointer;
this.observation = observation;
this.transitionDescriptor = transitionDescriptor;
}
}
private static class ForwardStepResult {
final Map newMessage;
/**
* Includes back pointers to previous state candidates for retrieving the most likely
* sequence after the forward pass.
*/
final Map> newExtendedStates;
ForwardStepResult(int numberStates) {
newMessage = new LinkedHashMap<>(Utils.initialHashMapCapacity(numberStates));
newExtendedStates = new LinkedHashMap<>(Utils.initialHashMapCapacity(numberStates));
}
}
/**
* Allows to retrieve the most likely sequence using back pointers.
*/
private Map> lastExtendedStates;
private Collection prevCandidates;
/**
* For each state s_t of the current time step t, message.get(s_t) contains the log
* probability of the most likely sequence ending in state s_t with given observations
* o_1, ..., o_t.
*
* Formally, this is max log p(s_1, ..., s_t, o_1, ..., o_t) w.r.t. s_1, ..., s_{t-1}.
* Note that to compute the most likely state sequence, it is sufficient and more
* efficient to compute in each time step the joint probability of states and observations
* instead of computing the conditional probability of states given the observations.
*/
private Map message;
private boolean isBroken = false;
private List