aima.core.probability.hmm.exact.FixedLagSmoothing Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of aima-core Show documentation
Show all versions of aima-core Show documentation
AIMA-Java Core Algorithms from the book Artificial Intelligence a Modern Approach 3rd Ed.
The newest version!
package aima.core.probability.hmm.exact;
import java.util.LinkedList;
import java.util.List;
import aima.core.probability.CategoricalDistribution;
import aima.core.probability.hmm.HiddenMarkovModel;
import aima.core.probability.proposition.AssignmentProposition;
import aima.core.util.math.Matrix;
/**
* Artificial Intelligence A Modern Approach (3rd Edition): page 580.
*
*
*
* function FIXED-LAG-SMOOTHING(et, hmm, d) returns a distribution over Xt-d
* inputs: et, the current evidence from time step t
* hmm, a hidden Markov model with S * S transition matrix T
* d, the length of the lag for smoothing
* persistent: t, the current time, initially 1
* f, the forward message P(Xt | e1:t), initially hmm.PRIOR
* B, the d-step backward transformation matrix, initially the identity matrix
* et-d:t, double-ended list of evidence from t-d to t, initially empty
* local variables: Ot-d, Ot, diagonal matrices containing the sensor model information
*
* add et to the end of et-d:t
* Ot <- diagonal matrix containing P(et | Xt)
* if t > d then
* f <- FORWARD(f, et)
* remove et-d-1 from the beginning of et-d:t
* Ot-d <- diagonal matrix containing P(et-d | Xt-d)
* B <- O-1t-dBTOt
* else B <- BTOt
* t <- t + 1
* if t > d then return NORMALIZE(f * B1) else return null
*
*
* Figure 15.6 An algorithm for smoothing with a fixed time lag of d steps,
* implemented as an online algorithm that outputs the new smoothed estimate
* given the observation for a new time step. Notice that the final output
* NORMALIZE(f * B1) is just αf*b, by Equation
* (15.14).
*
* Note: There appears to be two minor defects in the algorithm outlined
* in the book:
* f <- FORWARD(f, et)
* should be:
* f <- FORWARD(f, et-d)
* as we are returning a smoothed step for t-d and not the current time t.
*
* The update of:
* t <- t + 1
* should occur after the return value is calculated. Otherwise when t == d the
* value returned is based on HMM.prior in the calculation as opposed to a
* correctly calculated forward message. Comments welcome.
*
* @author Ciaran O'Reilly
* @author Ravi Mohan
*
*/
public class FixedLagSmoothing {
// persistent:
// t, the current time, initially 1
private int t = 1;
// f, the forward message P(Xt | e1:t),
// initially hmm.PRIOR
private Matrix f = null;
// B, the d-step backward transformation matrix, initially the
// identity matrix
private Matrix B = null;
// et-d:t, double-ended list of evidence from t-d to t, initially
// empty
private List e_tmd_to_t = new LinkedList();
// a hidden Markov model with S * S transition matrix T
private HiddenMarkovModel hmm = null;
// d, the length of the lag for smoothing
private int d = 1;
//
private Matrix unitMessage = null;
/**
* Create a Fixed-Lag-Smoothing implementation, that sets up the required
* persistent values.
*
* @param hmm
* a hidden Markov model with S * S transition matrix T
* @param d
* d, the length of the lag for smoothing
*/
public FixedLagSmoothing(HiddenMarkovModel hmm, int d) {
this.hmm = hmm;
this.d = d;
initPersistent();
}
/**
* Algorithm for smoothing with a fixed time lag of d steps, implemented as
* an online algorithm that outputs the new smoothed estimate given the
* observation for a new time step.
*
* @param et
* the current evidence from time step t
* @return a distribution over Xt-d
*/
public CategoricalDistribution fixedLagSmoothing(
List et) {
// local variables: Ot-d, Ot,
// diagonal matrices containing the sensor model information
Matrix O_tmd, O_t;
// add et to the end of et-d:t
e_tmd_to_t.add(hmm.getEvidence(et));
// Ot <- diagonal matrix containing
// P(et | Xt)
O_t = e_tmd_to_t.get(e_tmd_to_t.size() - 1);
// if t > d then
if (t > d) {
// remove et-d-1 from the beginning of et-d:t
e_tmd_to_t.remove(0);
// Ot-d <- diagonal matrix containing
// P(et-d | Xt-d)
O_tmd = e_tmd_to_t.get(0);
// f <- FORWARD(f, et-d)
f = forward(f, O_tmd);
// B <-
// O-1t-dBTOt
B = O_tmd.inverse().times(hmm.getTransitionModel().inverse())
.times(B).times(hmm.getTransitionModel()).times(O_t);
} else {
// else B <- BTOt
B = B.times(hmm.getTransitionModel()).times(O_t);
}
// if t > d then return NORMALIZE(f * B1) else return null
CategoricalDistribution rVal = null;
if (t > d) {
rVal = hmm
.convert(hmm.normalize(f.arrayTimes(B.times(unitMessage))));
}
// t <- t + 1
t = t + 1;
return rVal;
}
/**
* The forward equation (15.5) in Matrix form becomes (15.12):
*
*
* f1:t+1 = αOt+1TTf1:t
*
*
* @param f1_t
* f1:t
* @param O_tp1
* Ot+1
* @return f1:t+1
*/
public Matrix forward(Matrix f1_t, Matrix O_tp1) {
return hmm.normalize(O_tp1.times(hmm.getTransitionModel().transpose()
.times(f1_t)));
}
//
// PRIVATE METHODS
//
private void initPersistent() {
// t, the current time, initially 1
t = 1;
// f, the forward message P(Xt |
// e1:t),
// initially hmm.PRIOR
f = hmm.getPrior();
// B, the d-step backward transformation matrix, initially the
// identity matrix
B = Matrix.identity(f.getRowDimension(), f.getRowDimension());
// et-d:t, double-ended list of evidence from t-d to t,
// initially
// empty
e_tmd_to_t.clear();
unitMessage = hmm.createUnitMessage();
}
}