All Downloads are FREE. Search and download functionalities are using the official Maven repository.

edu.stanford.nlp.ling.tokensregex.MultiCoreMapNodePattern Maven / Gradle / Ivy

Go to download

Stanford CoreNLP provides a set of natural language analysis tools which can take raw English language text input and give the base forms of words, their parts of speech, whether they are names of companies, people, etc., normalize dates, times, and numeric quantities, mark up the structure of sentences in terms of phrases and word dependencies, and indicate which noun phrases refer to the same entities. It provides the foundational building blocks for higher level text understanding applications.

There is a newer version: 4.5.7
Show newest version
package edu.stanford.nlp.ling.tokensregex;

import edu.stanford.nlp.pipeline.ChunkAnnotationUtils;
import edu.stanford.nlp.pipeline.CoreMapAttributeAggregator;
import edu.stanford.nlp.util.CoreMap;
import edu.stanford.nlp.util.Interval;

import java.util.*;

/**
 * Pattern for matching across multiple core maps.
 *
 * 

* This class allows for string matches across tokens. It is not implemented efficiently * (it basically creates a big pretend token and tries to do string match on that) * so can be expensive to use. Whenever possible, SequencePattern should be used instead. *

* * @author Angel Chang */ public class MultiCoreMapNodePattern extends MultiNodePattern { Map aggregators = CoreMapAttributeAggregator.getDefaultAggregators(); NodePattern nodePattern; public MultiCoreMapNodePattern() {} public MultiCoreMapNodePattern(NodePattern nodePattern) { this.nodePattern = nodePattern; } public MultiCoreMapNodePattern(NodePattern nodePattern, Map aggregators) { this.nodePattern = nodePattern; this.aggregators = aggregators; } protected Collection> match(List nodes, int start) { List> matched = new ArrayList<>(); int minEnd = start + minNodes; int maxEnd = nodes.size(); if (maxNodes >= 0 && maxNodes + start < nodes.size()) { maxEnd = maxNodes + start; } for (int end = minEnd; end <= maxEnd; end++) { CoreMap chunk = ChunkAnnotationUtils.getMergedChunk(nodes, start, end, aggregators, null); if (nodePattern.match(chunk)) { matched.add(Interval.toInterval(start, end)); } } return matched; } public static class StringSequenceAnnotationPattern extends MultiNodePattern { Class textKey; PhraseTable phraseTable; public StringSequenceAnnotationPattern(Class textKey, Set> targets, boolean ignoreCase) { this.textKey = textKey; phraseTable = new PhraseTable(false, ignoreCase, false); for (List target:targets) { phraseTable.addPhrase(target); if (maxNodes < 0 || target.size() > maxNodes) maxNodes = target.size(); } } public StringSequenceAnnotationPattern(Class textKey, Set> targets) { this(textKey, targets, false); } public StringSequenceAnnotationPattern(Class textKey, Map, Object> targets, boolean ignoreCase) { this.textKey = textKey; phraseTable = new PhraseTable(false, ignoreCase, false); for (List target:targets.keySet()) { phraseTable.addPhrase(target, null, targets.get(target)); if (maxNodes < 0 || target.size() > maxNodes) maxNodes = target.size(); } } public StringSequenceAnnotationPattern(Class textKey, Map, Object> targets) { this(textKey, targets, false); } protected Collection> match(List nodes, int start) { PhraseTable.WordList words = new PhraseTable.TokenList(nodes, textKey); List matches = phraseTable.findMatches(words, start, nodes.size(), false); Collection> intervals = new ArrayList<>(matches.size()); for (PhraseTable.PhraseMatch match:matches) { intervals.add(match.getInterval()); } return intervals; } public String toString() { return ":" + phraseTable; } } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy