All Downloads are FREE. Search and download functionalities are using the official Maven repository.

edu.stanford.nlp.classify.AdaptedGaussianPriorObjectiveFunction Maven / Gradle / Ivy

Go to download

Stanford CoreNLP provides a set of natural language analysis tools which can take raw English language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases and word dependencies, and indicate which noun phrases refer to the same entities. It provides the foundational building blocks for higher level text understanding applications.

There is a newer version: 4.5.7
Show newest version
package edu.stanford.nlp.classify;

import edu.stanford.nlp.math.ArrayMath;
import java.util.Arrays;


/**
 * Adapt the mean of the Gaussian Prior by shifting the mean to the previously trained weights
 * @author Pi-Chuan Chang
 * @author Sarah Spikes ([email protected]) (Templatization)
 *
 * @param  The type of the labels in the Dataset (one can be passed in to the constructor)
 * @param  The type of the features in the Dataset
 */

public class AdaptedGaussianPriorObjectiveFunction extends LogConditionalObjectiveFunction {

  double[] weights;

  /**
   * Calculate the conditional likelihood.
   */
  @Override
  protected void calculate(double[] x) {
    if (useSummedConditionalLikelihood) {
      calculateSCL(x);
    } else {
      calculateCL(x);
    }
  }


  /**
   */
  private void calculateSCL(double[] x) {
    throw new UnsupportedOperationException();
  }

  /**
   */
  private void calculateCL(double[] x) {
    value = 0.0;
    if (derivativeNumerator == null) {
      derivativeNumerator = new double[x.length];
      for (int d = 0; d < data.length; d++) {
        int[] features = data[d];
        for (int feature : features) {
          int i = indexOf(feature, labels[d]);
          if (dataWeights == null) {
            derivativeNumerator[i] -= 1;
          } else {
            derivativeNumerator[i] -= dataWeights[d];
          }
        }
      }
    }
    copy(derivative, derivativeNumerator);

    double[] sums = new double[numClasses];
    double[] probs = new double[numClasses];

    for (int d = 0; d < data.length; d++) {
      int[] features = data[d];
      // activation
      Arrays.fill(sums, 0.0);

      for (int c = 0; c < numClasses; c++) {
        for (int feature : features) {
          int i = indexOf(feature, c);
          sums[c] += x[i];
        }
      }
      double total = ArrayMath.logSum(sums);
      for (int c = 0; c < numClasses; c++) {
        probs[c] = Math.exp(sums[c] - total);
        if (dataWeights != null) {
          probs[c] *= dataWeights[d];
        }
        for (int feature : features) {
          int i = indexOf(feature, c);
          derivative[i] += probs[c];
        }
      }

      double dV = sums[labels[d]] - total;
      if (dataWeights != null) {
        dV *= dataWeights[d];
      }
      value -= dV;
    }
    //Logging.logger(this.getClass()).info("x length="+x.length);
    //Logging.logger(this.getClass()).info("weights length="+weights.length);
    double[] newX = ArrayMath.pairwiseSubtract(x, weights);
    value += prior.compute(newX, derivative);
  }

  /**
   */
  @Override
  protected void rvfcalculate(double[] x) {
    throw new UnsupportedOperationException();
  }

  public AdaptedGaussianPriorObjectiveFunction(GeneralDataset dataset, LogPrior prior, double[][] weights) {
    super(dataset, prior);
    this.weights = to1D(weights);
  }

  public double[] to1D(double[][] x2) {
    double[] x = new double[numFeatures*numClasses];
    for (int i = 0; i < numFeatures; i++) {
      for (int j = 0; j < numClasses; j++) {
        x[indexOf(i, j)] = x2[i][j];
      }
    }
    return x;
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy