All Downloads are FREE. Search and download functionalities are using the official Maven repository.

edu.stanford.nlp.coref.statistical.DatasetBuilder Maven / Gradle / Ivy

Go to download

Stanford CoreNLP provides a set of natural language analysis tools which can take raw English language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases and word dependencies, and indicate which noun phrases refer to the same entities. It provides the foundational building blocks for higher level text understanding applications.

There is a newer version: 4.5.7
Show newest version
package edu.stanford.nlp.coref.statistical;

import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.stream.Collectors;

import edu.stanford.nlp.coref.CorefDocumentProcessor;
import edu.stanford.nlp.coref.CorefUtils;
import edu.stanford.nlp.coref.data.Document;
import edu.stanford.nlp.io.IOUtils;
import edu.stanford.nlp.util.Pair;

/**
 * Produces train/dev/test sets for training coreference models with (optionally) sampling.
 * @author Kevin Clark
 */
public class DatasetBuilder implements CorefDocumentProcessor {
  private final int maxExamplesPerDocument;
  private final double minClassImbalancedPerDocument;
  private final Map, Boolean>> mentionPairs;
  private final Random random;

  public DatasetBuilder() {
    this(0, Integer.MAX_VALUE);
  }

  public DatasetBuilder(double minClassImbalancedPerDocument, int maxExamplesPerDocument) {
    this.maxExamplesPerDocument = maxExamplesPerDocument;
    this.minClassImbalancedPerDocument = minClassImbalancedPerDocument;
    mentionPairs = new HashMap<>();
    random = new Random(0);
  }

  @Override
  public void process(int id, Document document) {
    Map, Boolean> labeledPairs =
        CorefUtils.getLabeledMentionPairs(document);

    long numP = labeledPairs.keySet().stream().filter(m -> labeledPairs.get(m)).count();
    List> negative = labeledPairs.keySet().stream()
        .filter(m -> !labeledPairs.get(m))
        .collect(Collectors.toList());
    int numN = negative.size();
    if (numP / (float) (numP + numN) < minClassImbalancedPerDocument) {
      numN = (int) (numP / minClassImbalancedPerDocument - numP);
      Collections.shuffle(negative);
      for (int i = numN; i < negative.size(); i++) {
        labeledPairs.remove(negative.get(i));
      }
    }

    Map> mentionToCandidateAntecedents = new HashMap<>();
    for (Pair pair : labeledPairs.keySet()) {
      List candidateAntecedents = mentionToCandidateAntecedents.get(pair.second);
      if (candidateAntecedents == null) {
          candidateAntecedents = new ArrayList<>();
          mentionToCandidateAntecedents.put(pair.second, candidateAntecedents);
      }
      candidateAntecedents.add(pair.first);
    }

    List mentions = new ArrayList<>(mentionToCandidateAntecedents.keySet());
    while (labeledPairs.size() > maxExamplesPerDocument) {
      int mention = mentions.remove(random.nextInt(mentions.size()));
      for (int candidateAntecedent : mentionToCandidateAntecedents.get(mention)) {
        labeledPairs.remove(new Pair<>(candidateAntecedent, mention));
      }
    }

    mentionPairs.put(id, labeledPairs);
  }

  @Override
  public void finish() throws Exception {
    IOUtils.writeObjectToFile(mentionPairs, StatisticalCorefTrainer.datasetFile);
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy