opennlp.tools.util.eval.FMeasure Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package opennlp.tools.util.eval;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* The {@link FMeasure} is a utility class for {@link Evaluator evaluators}
* which measures precision, recall and the resulting f-measure.
*
* Evaluation results are the arithmetic mean of the precision
* scores calculated for each reference sample and
* the arithmetic mean of the recall scores calculated for
* each reference sample.
*/
public final class FMeasure {
/**
* |selected| = true positives + false positives
* the count of selected (or retrieved) items.
*/
private long selected;
/**
* |target| = true positives + false negatives
* the count of target (or correct) items.
*/
private long target;
/**
* Holds the number of true positives found.
*/
private long truePositive;
/**
* @return Retrieves the arithmetic mean of the precision scores calculated
* for each evaluated sample.
*/
public double getPrecisionScore() {
return selected > 0 ? (double) truePositive / (double) selected : 0;
}
/**
* @return Retrieves the arithmetic mean of the recall score calculated
* for each evaluated sample.
*/
public double getRecallScore() {
return target > 0 ? (double) truePositive / (double) target : 0;
}
/**
* {@code f-measure = 2 * precision * recall / (precision + recall)}.
*
* @return Retrieves the f-measure or {@code -1} if precision + recall {@code <= 0}
*/
public double getFMeasure() {
if (getPrecisionScore() + getRecallScore() > 0) {
return 2 * (getPrecisionScore() * getRecallScore())
/ (getPrecisionScore() + getRecallScore());
} else {
// cannot divide by zero, return error code
return -1;
}
}
/**
* Updates the score based on the number of true positives and
* the number of predictions and references.
*
* @param references the provided references
* @param predictions the predicted spans
*/
public void updateScores(final Object[] references, final Object[] predictions) {
truePositive += countTruePositives(references, predictions);
selected += predictions.length;
target += references.length;
}
/**
* Merge results into {@link FMeasure metric}.
*
* @param measure The {@link FMeasure} to merge.
*/
public void mergeInto(final FMeasure measure) {
this.selected += measure.selected;
this.target += measure.target;
this.truePositive += measure.truePositive;
}
/**
* @return A human-readable {@link String} representation.
*/
@Override
public String toString() {
return "Precision: " + getPrecisionScore() + "\n"
+ "Recall: " + getRecallScore() + "\n" + "F-Measure: " + getFMeasure();
}
/**
* This method counts the number of objects which are equal and occur in the
* references and predictions arrays.
* Matched items are removed from the prediction list.
*
* @param references
* The gold standard.
* @param predictions
* The predictions.
* @return The number of true positives.
*/
static int countTruePositives(final Object[] references, final Object[] predictions) {
List