All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.cleartk.srl.ArgumentIdentifier Maven / Gradle / Ivy

/** 
 * Copyright (c) 2007-2008, Regents of the University of Colorado 
 * All rights reserved.
 * 
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 * 
 * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 
 * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 
 * Neither the name of the University of Colorado at Boulder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 
 * 
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE. 
 */
package org.cleartk.srl;

import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.logging.Logger;

import org.apache.uima.UimaContext;
import org.apache.uima.analysis_engine.AnalysisEngineDescription;
import org.apache.uima.analysis_engine.AnalysisEngineProcessException;
import org.apache.uima.jcas.JCas;
import org.apache.uima.resource.ResourceInitializationException;
import org.cleartk.classifier.CleartkAnnotator;
import org.cleartk.classifier.DataWriterFactory;
import org.cleartk.classifier.Feature;
import org.cleartk.classifier.Instance;
import org.cleartk.classifier.feature.extractor.CleartkExtractor;
import org.cleartk.classifier.feature.extractor.CleartkExtractor.FirstCovered;
import org.cleartk.classifier.feature.extractor.CleartkExtractor.LastCovered;
import org.cleartk.classifier.feature.extractor.CleartkExtractorException;
import org.cleartk.classifier.feature.extractor.annotationpair.AnnotationPairFeatureExtractor;
import org.cleartk.classifier.feature.extractor.annotationpair.MatchingAnnotationPairExtractor;
import org.cleartk.classifier.feature.extractor.annotationpair.NamingAnnotationPairFeatureExtractor;
import org.cleartk.classifier.feature.extractor.annotationpair.RelativePositionExtractor;
import org.cleartk.classifier.feature.extractor.simple.CoveredTextExtractor;
import org.cleartk.classifier.feature.extractor.simple.MatchingAnnotationExtractor;
import org.cleartk.classifier.feature.extractor.simple.NamingExtractor;
import org.cleartk.classifier.feature.extractor.simple.SimpleFeatureExtractor;
import org.cleartk.classifier.jar.DirectoryDataWriterFactory;
import org.cleartk.classifier.jar.GenericJarClassifierFactory;
import org.cleartk.srl.feature.NamedEntityExtractor;
import org.cleartk.srl.feature.NodeTypeExtractor;
import org.cleartk.srl.feature.POSExtractor;
import org.cleartk.srl.feature.StemExtractor;
import org.cleartk.srl.type.Argument;
import org.cleartk.srl.type.Predicate;
import org.cleartk.srl.type.SemanticArgument;
import org.cleartk.syntax.constituent.type.TopTreebankNode;
import org.cleartk.syntax.constituent.type.TreebankNode;
import org.cleartk.syntax.feature.HeadWordExtractor;
import org.cleartk.syntax.feature.SubCategorizationExtractor;
import org.cleartk.syntax.feature.SyntacticPathExtractor;
import org.cleartk.token.type.Sentence;
import org.cleartk.token.type.Token;
import org.cleartk.util.AnnotationUtil;
import org.cleartk.util.UIMAUtil;
import org.uimafit.factory.AnalysisEngineFactory;
import org.uimafit.util.JCasUtil;

/**
 * 
* Copyright (c) 2007-2008, Regents of the University of Colorado
* All rights reserved. * * *

* ArgumentIdentifier can work in 3 modes: *

  • training mode: take in fully annotated Propbank style data and generate training data * for detection of arguments
  • *
  • filter mode: take in fully annotated Propbank style data and a model, then add * annotations for falsely detected arguments, and remove annotations for missed arguments; this is * to facilitate training of AnnotationClassifier
  • *
  • annotation mode: take in unlabeled Treebank style data and annotate all detected * arguments (no labeling is done by this annotator)
  • * *

    * * @author Philipp Wetzler, Philip Ogren * @deprecated the SRL code is going away due to neglect and incompleteness */ @Deprecated public class ArgumentIdentifier extends CleartkAnnotator { public static AnalysisEngineDescription getWriterDescription( Class> dataWriterFactoryClass, File outputDirectory) throws ResourceInitializationException { return AnalysisEngineFactory.createPrimitiveDescription( ArgumentIdentifier.class, CleartkAnnotator.PARAM_DATA_WRITER_FACTORY_CLASS_NAME, dataWriterFactoryClass.getName(), DirectoryDataWriterFactory.PARAM_OUTPUT_DIRECTORY, outputDirectory.toString()); } public static AnalysisEngineDescription getClassifierDescription(File classifierJar) throws ResourceInitializationException { return AnalysisEngineFactory.createPrimitiveDescription( ArgumentIdentifier.class, GenericJarClassifierFactory.PARAM_CLASSIFIER_JAR_PATH, classifierJar.toString()); } @Override public void initialize(UimaContext context) throws ResourceInitializationException { super.initialize(context); SimpleFeatureExtractor defaultTokenExtractorSet = new MatchingAnnotationExtractor( Token.class, new CoveredTextExtractor(), new StemExtractor(), new POSExtractor()); this.perPredicateExtractor = new NamingExtractor("Predicate", new MatchingAnnotationExtractor( Token.class, defaultTokenExtractorSet), new MatchingAnnotationExtractor( TreebankNode.class, new SubCategorizationExtractor())); this.perConstituentExtractor = new NamingExtractor("Constituent", new NodeTypeExtractor(), // new TypePathExtractor(TreebankNode.class, "nodeTags"), new HeadWordExtractor(defaultTokenExtractorSet), new CleartkExtractor(Token.class, defaultTokenExtractorSet, new FirstCovered(1)), new CleartkExtractor(Token.class, defaultTokenExtractorSet, new LastCovered(1)), new NamedEntityExtractor()); this.perPredicatAndConstituentExtractor = new NamingAnnotationPairFeatureExtractor( "PredicateAndConstituent", new MatchingAnnotationPairExtractor( TreebankNode.class, TreebankNode.class, new SyntacticPathExtractor(new NodeTypeExtractor()), new RelativePositionExtractor())); } @Override public void process(JCas jCas) throws AnalysisEngineProcessException { /* * Iterate over sentences in document */ Collection sentences = JCasUtil.select(jCas, Sentence.class); nSentences = 0; nPredicates = 0; nConstituents = 0; for (Sentence sentence : sentences) { processSentence(jCas, sentence); } logger.info(String.format( "processed %d sentences, %d predicates, ~%d constituents per predicate", nSentences, nPredicates, nPredicates == 0 ? 0 : nConstituents / nPredicates)); } void processSentence(JCas jCas, Sentence sentence) throws AnalysisEngineProcessException { nSentences += 1; if (sentence.getCoveredText().length() > 40) logger.fine(String.format( "process sentence \"%s ...\"", sentence.getCoveredText().substring(0, 39))); else logger.fine(String.format("process sentence \"%s\"", sentence.getCoveredText())); /* * Pre-compute sentence level data: sentenceConstituents: list of all constituents in sentence */ TopTreebankNode top = AnnotationUtil.selectFirstMatching(jCas, TopTreebankNode.class, sentence); if (top == null) { CleartkExtractorException.noAnnotationInWindow(TopTreebankNode.class, sentence); } List sentenceConstituents = new ArrayList(200); collectConstituents(top, sentenceConstituents); /* * Compute constituent features for all constituents in sentence */ List> sentenceConstituentFeatures = new ArrayList>( sentenceConstituents.size()); for (TreebankNode constituent : sentenceConstituents) { sentenceConstituentFeatures.add(perConstituentExtractor.extract(jCas, constituent)); } /* * Iterate over predicates in sentence */ List predicates = JCasUtil.selectCovered(jCas, Predicate.class, sentence); for (Predicate predicate : predicates) { processPredicate(jCas, predicate, sentenceConstituents, sentenceConstituentFeatures); } } public void processPredicate( JCas jCas, Predicate predicate, List sentenceConstituents, List> sentenceConstituentFeatures) throws AnalysisEngineProcessException { nPredicates += 1; /* * Compute predicate features */ List predicateFeatures = new ArrayList(12); predicateFeatures.addAll(perPredicateExtractor.extract(jCas, predicate.getAnnotation())); /* * Iterate over constituents in sentence */ for (int i = 0; i < sentenceConstituents.size(); i++) { nConstituents += 1; TreebankNode constituent = sentenceConstituents.get(i); Instance instance = new Instance(); /* * Compute predicate-constituent features */ instance.addAll(perPredicatAndConstituentExtractor.extract( jCas, constituent, predicate.getAnnotation())); /* * Add constituent features */ instance.addAll(sentenceConstituentFeatures.get(i)); /* * Add predicate features */ instance.addAll(predicateFeatures); if (isTraining()) { instance.setOutcome(false); for (int j = 0; j < predicate.getArguments().size(); j++) { Argument arg = predicate.getArguments(j); if (arg.getAnnotation().equals(constituent)) { instance.setOutcome(true); break; } } } if (this.isTraining()) { this.dataWriter.write(instance); } else { boolean isArgument = this.classifier.classify(instance.getFeatures()); if (isArgument) { SemanticArgument arg = new SemanticArgument(jCas); arg.setAnnotation(constituent); arg.setBegin(constituent.getBegin()); arg.setEnd(constituent.getEnd()); arg.setLabel("?"); arg.addToIndexes(); List args = UIMAUtil.toList(predicate.getArguments(), Argument.class); args.add(arg); predicate.setArguments(UIMAUtil.toFSArray(jCas, args)); } } } } /** * Recursively build a list of constituents under a TreebankNode. * * @param top * the root of the parse tree to operate on; top itself will also be added, unless * it is of type TopTrebankNode * @param constituents * list of nodes to add to */ protected void collectConstituents(TreebankNode top, List constituents) { if (!(top instanceof TopTreebankNode)) constituents.add(top); if (top.getChildren() == null) return; int numberOfChildren = top.getChildren().size(); for (int i = 0; i < numberOfChildren; i++) { collectConstituents(top.getChildren(i), constituents); } } private SimpleFeatureExtractor perPredicateExtractor; private SimpleFeatureExtractor perConstituentExtractor; private AnnotationPairFeatureExtractor perPredicatAndConstituentExtractor; private int nSentences; private int nPredicates; private int nConstituents; private Logger logger = Logger.getLogger(this.getClass().getName()); }




    © 2015 - 2024 Weber Informatics LLC | Privacy Policy