edu.stanford.nlp.coref.neural.NeuralCorefAlgorithm Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of stanford-corenlp Show documentation
Show all versions of stanford-corenlp Show documentation
Stanford CoreNLP provides a set of natural language analysis tools which can take raw English language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases and word dependencies, and indicate which noun phrases refer to the same entities. It provides the foundational building blocks for higher level text understanding applications.
package edu.stanford.nlp.coref.neural;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import edu.stanford.nlp.coref.CorefAlgorithm;
import edu.stanford.nlp.coref.CorefProperties;
import edu.stanford.nlp.coref.CorefUtils;
import edu.stanford.nlp.coref.data.Dictionaries;
import edu.stanford.nlp.coref.data.Document;
import edu.stanford.nlp.coref.data.Mention;
import edu.stanford.nlp.io.IOUtils;
import edu.stanford.nlp.stats.ClassicCounter;
import edu.stanford.nlp.stats.Counter;
import edu.stanford.nlp.util.Pair;
import edu.stanford.nlp.util.logging.Redwood;
import org.ejml.simple.SimpleMatrix;
/**
* Neural mention-ranking coreference model as described in
*
* Kevin Clark and Christopher D. Manning. 2016.
*
* Deep Reinforcement Learning for Mention-Ranking Coreference Models.
* In Empirical Methods on Natural Language Processing.
*
* Training code is implemented in python and is available at
* https://github.com/clarkkev/deep-coref.
* @author Kevin Clark
*/
public class NeuralCorefAlgorithm implements CorefAlgorithm {
private static Redwood.RedwoodChannels log = Redwood.channels(NeuralCorefAlgorithm.class);
private final double greedyness;
private final int maxMentionDistance;
private final int maxMentionDistanceWithStringMatch;
private final CategoricalFeatureExtractor featureExtractor;
private final EmbeddingExtractor embeddingExtractor;
private final NeuralCorefModel model;
public NeuralCorefAlgorithm(Properties props, Dictionaries dictionaries) {
greedyness = NeuralCorefProperties.greedyness(props);
maxMentionDistance = CorefProperties.maxMentionDistance(props);
maxMentionDistanceWithStringMatch = CorefProperties.maxMentionDistanceWithStringMatch(props);
model = IOUtils.readObjectAnnouncingTimingFromURLOrClasspathOrFileSystem(
log, "Loading coref model", NeuralCorefProperties.modelPath(props));
embeddingExtractor = new EmbeddingExtractor(CorefProperties.conll(props),
IOUtils.readObjectAnnouncingTimingFromURLOrClasspathOrFileSystem(
log, "Loading coref embeddings", NeuralCorefProperties.pretrainedEmbeddingsPath(props)),
model.getWordEmbeddings());
featureExtractor = new CategoricalFeatureExtractor(props, dictionaries);
}
@Override
public void runCoref(Document document) {
List sortedMentions = CorefUtils.getSortedMentions(document);
Map> mentionsByHeadIndex = new HashMap<>();
for (Mention m : sortedMentions) {
List withIndex = mentionsByHeadIndex.get(m.headIndex);
if (withIndex == null) {
withIndex = new ArrayList<>();
mentionsByHeadIndex.put(m.headIndex, withIndex);
}
withIndex.add(m);
}
SimpleMatrix documentEmbedding = embeddingExtractor.getDocumentEmbedding(document);
Map antecedentEmbeddings = new HashMap<>();
Map anaphorEmbeddings = new HashMap<>();
Counter anaphoricityScores = new ClassicCounter<>();
for (Mention m : sortedMentions) {
SimpleMatrix mentionEmbedding = embeddingExtractor.getMentionEmbeddings(m, documentEmbedding);
antecedentEmbeddings.put(m.mentionID, model.getAntecedentEmbedding(mentionEmbedding));
anaphorEmbeddings.put(m.mentionID, model.getAnaphorEmbedding(mentionEmbedding));
anaphoricityScores.incrementCount(m.mentionID,
model.getAnaphoricityScore(mentionEmbedding,
featureExtractor.getAnaphoricityFeatures(m, document, mentionsByHeadIndex)));
}
Map> mentionToCandidateAntecedents = CorefUtils.heuristicFilter(sortedMentions,
maxMentionDistance, maxMentionDistanceWithStringMatch);
for (Map.Entry> e : mentionToCandidateAntecedents.entrySet()) {
double bestScore = anaphoricityScores.getCount(e.getKey()) - 50 * (greedyness - 0.5);
int m = e.getKey();
Integer antecedent = null;
for (int ca : e.getValue()) {
double score = model.getPairwiseScore(antecedentEmbeddings.get(ca),
anaphorEmbeddings.get(m), featureExtractor.getPairFeatures(
new Pair<>(ca, m), document, mentionsByHeadIndex));
if (score > bestScore) {
bestScore = score;
antecedent = ca;
}
}
if (antecedent != null) {
CorefUtils.mergeCoreferenceClusters(new Pair<>(antecedent, m), document);
}
}
}
}