Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/* ---------------------------------------------------------------------
* Numenta Platform for Intelligent Computing (NuPIC)
* Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
* with Numenta, Inc., for a separate license for this software code, the
* following terms and conditions apply:
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero Public License version 3 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero Public License for more details.
*
* You should have received a copy of the GNU Affero Public License
* along with this program. If not, see http://www.gnu.org/licenses.
*
* http://numenta.org/licenses/
* ---------------------------------------------------------------------
*/
package org.numenta.nupic.algorithms;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import no.uib.cipr.matrix.sparse.FlexCompRowMatrix;
import org.numenta.nupic.model.Persistable;
import org.numenta.nupic.util.ArrayUtils;
import org.numenta.nupic.util.Deque;
import org.numenta.nupic.util.Tuple;
import gnu.trove.list.TIntList;
import gnu.trove.list.array.TIntArrayList;
/**
* Implementation of a SDR classifier.
*
* The SDR classifier takes the form of a single layer classification network
* that takes SDRs as input and outputs a predicted distribution of classes.
*
* The SDR Classifier accepts a binary input pattern from the
* level below (the "activationPattern") and information from the sensor and
* encoders (the "classification") describing the true (target) input.
*
* The SDR classifier maps input patterns to class labels. There are as many
* output units as the number of class labels or buckets (in the case of scalar
* encoders). The output is a probabilistic distribution over all class labels.
*
* During inference, the output is calculated by first doing a weighted summation
* of all the inputs, and then perform a softmax nonlinear function to get
* the predicted distribution of class labels
*
* During learning, the connection weights between input units and output units
* are adjusted to maximize the likelihood of the model
*
* The SDR Classifier is a variation of the previous CLAClassifier which was
* not based on the references below.
*
*
* References:
* Alex Graves. Supervised Sequence Labeling with Recurrent Neural Networks
* PhD Thesis, 2008
* J. S. Bridle. Probabilistic interpretation of feedforward classification
* network outputs, with relationships to statistical pattern recognition.
* In F. Fogleman-Soulie and J.Herault, editors, Neurocomputing: Algorithms,
* Architectures and Applications, pp 227-236, Springer-Verlag, 1990
*
* @author Numenta
* @author Yuwei Cui
* @author David Ray
* @author Andrew Dillon
*/
public class SDRClassifier implements Persistable {
private static final long serialVersionUID = 1L;
int verbosity = 0;
/**
* The alpha used to adapt the weight matrix during
* learning. A larger alpha results in faster adaptation to the data.
*/
double alpha = 0.001;
/**
* Used to track the actual value within each
* bucket. A lower actValueAlpha results in longer term memory
*/
double actValueAlpha = 0.3;
/**
* The bit's learning iteration. This is updated each time store() gets
* called on this bit.
*/
int learnIteration;
/**
* This contains the offset between the recordNum (provided by caller) and
* learnIteration (internal only, always starts at 0).
*/
int recordNumMinusLearnIteration = -1;
/**
* This contains the highest value we've ever seen from the list of active cell indexes
* from the TM (patternNZ). It is used to pre-allocate fixed size arrays that holds the weights.
*/
int maxInputIdx = 0;
/**
* This contains the value of the highest bucket index we've ever seen
* It is used to pre-allocate fixed size arrays that hold the weights of
* each bucket index during inference
*/
int maxBucketIdx;
/**
* The connection weight matrix
*/
Map weightMatrix = new HashMap<>();
/** The sequence different steps of multi-step predictions */
TIntList steps = new TIntArrayList();
/**
* History of the last _maxSteps activation patterns. We need to keep
* these so that we can associate the current iteration's classification
* with the activationPattern from N steps ago
*/
Deque patternNZHistory;
/**
* This keeps track of the actual value to use for each bucket index. We
* start with 1 bucket, no actual value so that the first infer has something
* to return
*/
List> actualValues = new ArrayList