
org.apache.lucene.search.similarities.TFIDFSimilarity Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search.similarities;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermStatistics;
import org.apache.lucene.util.SmallFloat;
/**
* Implementation of {@link Similarity} with the Vector Space Model.
*
* Expert: Scoring API.
*
TFIDFSimilarity defines the components of Lucene scoring.
* Overriding computation of these components is a convenient
* way to alter Lucene scoring.
*
*
Suggested reading:
*
* Introduction To Information Retrieval, Chapter 6.
*
*
The following describes how Lucene scoring evolves from
* underlying information retrieval models to (efficient) implementation.
* We first brief on VSM Score,
* then derive from it Lucene's Conceptual Scoring Formula,
* from which, finally, evolves Lucene's Practical Scoring Function
* (the latter is connected directly with Lucene classes and methods).
*
*
Lucene combines
*
* Boolean model (BM) of Information Retrieval
* with
*
* Vector Space Model (VSM) of Information Retrieval -
* documents "approved" by BM are scored by VSM.
*
*
In VSM, documents and queries are represented as
* weighted vectors in a multi-dimensional space,
* where each distinct index term is a dimension,
* and weights are
* Tf-idf values.
*
*
VSM does not require weights to be Tf-idf values,
* but Tf-idf values are believed to produce search results of high quality,
* and so Lucene is using Tf-idf.
* Tf and Idf are described in more detail below,
* but for now, for completion, let's just say that
* for given term t and document (or query) x,
* Tf(t,x) varies with the number of occurrences of term t in x
* (when one increases so does the other) and
* idf(t) similarly varies with the inverse of the
* number of index documents containing term t.
*
*
VSM score of document d for query q is the
*
* Cosine Similarity
* of the weighted query vectors V(q) and V(d):
*
*
*
*
*
*
*
*
*
* cosine-similarity(q,d) =
*
*
*
* cosine similarity formula
* V(q) · V(d)
* –––––––––
* |V(q)| |V(d)|
*
*
*
*
*
*
*
*
* VSM Score
*
*
*
*
*
* Where V(q) · V(d) is the
* dot product
* of the weighted vectors,
* and |V(q)| and |V(d)| are their
* Euclidean norms.
*
* Note: the above equation can be viewed as the dot product of
* the normalized weighted vectors, in the sense that dividing
* V(q) by its euclidean norm is normalizing it to a unit vector.
*
*
Lucene refines VSM score for both search quality and usability:
*
* - Normalizing V(d) to the unit vector is known to be problematic in that
* it removes all document length information.
* For some documents removing this info is probably ok,
* e.g. a document made by duplicating a certain paragraph 10 times,
* especially if that paragraph is made of distinct terms.
* But for a document which contains no duplicated paragraphs,
* this might be wrong.
* To avoid this problem, a different document length normalization
* factor is used, which normalizes to a vector equal to or larger
* than the unit vector: doc-len-norm(d).
*
*
* - At indexing, users can specify that certain documents are more
* important than others, by assigning a document boost.
* For this, the score of each document is also multiplied by its boost value
* doc-boost(d).
*
*
* - Lucene is field based, hence each query term applies to a single
* field, document length normalization is by the length of the certain field,
* and in addition to document boost there are also document fields boosts.
*
*
* - The same field can be added to a document during indexing several times,
* and so the boost of that field is the multiplication of the boosts of
* the separate additions (or parts) of that field within the document.
*
*
* - At search time users can specify boosts to each query, sub-query, and
* each query term, hence the contribution of a query term to the score of
* a document is multiplied by the boost of that query term query-boost(q).
*
*
* - A document may match a multi term query without containing all
* the terms of that query (this is correct for some of the queries).
*
*
*
* Under the simplifying assumption of a single field in the index,
* we get Lucene's Conceptual scoring formula:
*
*
*
*
*
*
*
*
*
* score(q,d) =
* query-boost(q) ·
*
*
*
* Lucene conceptual scoring formula
* V(q) · V(d)
* –––––––––
* |V(q)|
*
*
*
* · doc-len-norm(d)
* · doc-boost(d)
*
*
*
*
*
*
*
* Lucene Conceptual Scoring Formula
*
*
*
*
* The conceptual formula is a simplification in the sense that (1) terms and documents
* are fielded and (2) boosts are usually per query term rather than per query.
*
*
We now describe how Lucene implements this conceptual scoring formula, and
* derive from it Lucene's Practical Scoring Function.
*
*
For efficient score computation some scoring components
* are computed and aggregated in advance:
*
*
* - Query-boost for the query (actually for each query term)
* is known when search starts.
*
*
* - Query Euclidean norm |V(q)| can be computed when search starts,
* as it is independent of the document being scored.
* From search optimization perspective, it is a valid question
* why bother to normalize the query at all, because all
* scored documents will be multiplied by the same |V(q)|,
* and hence documents ranks (their order by score) will not
* be affected by this normalization.
* There are two good reasons to keep this normalization:
*
* - Recall that
*
* Cosine Similarity can be used find how similar
* two documents are. One can use Lucene for e.g.
* clustering, and use a document as a query to compute
* its similarity to other documents.
* In this use case it is important that the score of document d3
* for query d1 is comparable to the score of document d3
* for query d2. In other words, scores of a document for two
* distinct queries should be comparable.
* There are other applications that may require this.
* And this is exactly what normalizing the query vector V(q)
* provides: comparability (to a certain extent) of two or more queries.
*
*
*
*
* - Document length norm doc-len-norm(d) and document
* boost doc-boost(d) are known at indexing time.
* They are computed in advance and their multiplication
* is saved as a single value in the index: norm(d).
* (In the equations below, norm(t in d) means norm(field(t) in doc d)
* where field(t) is the field associated with term t.)
*
*
*
* Lucene's Practical Scoring Function is derived from the above.
* The color codes demonstrate how it relates
* to those of the conceptual formula:
*
*
*
*
*
*
*
*
* score(q,d) =
* ∑
*
*
* (
* tf(t in d) ·
* idf(t)2 ·
* t.getBoost() ·
* norm(t,d)
* )
*
*
*
*
* t in q
*
*
*
*
*
*
*
* Lucene Practical Scoring Function
*
*
*
* where
*
* -
*
* tf(t in d)
* correlates to the term's frequency,
* defined as the number of times term t appears in the currently scored document d.
* Documents that have more occurrences of a given term receive a higher score.
* Note that tf(t in q) is assumed to be 1 and therefore it does not appear in this equation,
* However if a query contains twice the same term, there will be
* two term-queries with that same term and hence the computation would still be correct (although
* not very efficient).
* The default computation for tf(t in d) in
* {@link org.apache.lucene.search.similarities.ClassicSimilarity#tf(float) ClassicSimilarity} is:
*
*
*
*
*
* {@link org.apache.lucene.search.similarities.ClassicSimilarity#tf(float) tf(t in d)} =
*
*
* frequency½
*
*
*
*
*
*
* -
*
* idf(t) stands for Inverse Document Frequency. This value
* correlates to the inverse of docFreq
* (the number of documents in which the term t appears).
* This means rarer terms give higher contribution to the total score.
* idf(t) appears for t in both the query and the document,
* hence it is squared in the equation.
* The default computation for idf(t) in
* {@link org.apache.lucene.search.similarities.ClassicSimilarity#idf(long, long) ClassicSimilarity} is:
*
*
*
*
*
* {@link org.apache.lucene.search.similarities.ClassicSimilarity#idf(long, long) idf(t)} =
*
*
* 1 + log (
*
*
*
* inverse document frequency computation
* docCount+1
* –––––––––
* docFreq+1
*
*
*
* )
*
*
*
*
*
*
* -
*
* t.getBoost()
* is a search time boost of term t in the query q as
* specified in the query text
* (see query syntax),
* or as set by wrapping with
* {@link org.apache.lucene.search.BoostQuery#BoostQuery(org.apache.lucene.search.Query, float) BoostQuery}.
* Notice that there is really no direct API for accessing a boost of one term in a multi term query,
* but rather multi terms are represented in a query as multi
* {@link org.apache.lucene.search.TermQuery TermQuery} objects,
* and so the boost of a term in the query is accessible by calling the sub-query
* {@link org.apache.lucene.search.BoostQuery#getBoost() getBoost()}.
*
*
*
* -
*
* norm(t,d) is an index-time boost factor that solely
* depends on the number of tokens of this field in the document, so
* that shorter fields contribute more to the score.
*
*
*
* @see org.apache.lucene.index.IndexWriterConfig#setSimilarity(Similarity)
* @see IndexSearcher#setSimilarity(Similarity)
*/
public abstract class TFIDFSimilarity extends Similarity {
/**
* Sole constructor. (For invocation by subclass
* constructors, typically implicit.)
*/
public TFIDFSimilarity() {}
/**
* True if overlap tokens (tokens with a position of increment of zero) are
* discounted from the document's length.
*/
protected boolean discountOverlaps = true;
/** Determines whether overlap tokens (Tokens with
* 0 position increment) are ignored when computing
* norm. By default this is true, meaning overlap
* tokens do not count when computing norms.
*
* @lucene.experimental
*
* @see #computeNorm
*/
public void setDiscountOverlaps(boolean v) {
discountOverlaps = v;
}
/**
* Returns true if overlap tokens are discounted from the document's length.
* @see #setDiscountOverlaps
*/
public boolean getDiscountOverlaps() {
return discountOverlaps;
}
/** Computes a score factor based on a term or phrase's frequency in a
* document. This value is multiplied by the {@link #idf(long, long)}
* factor for each term in the query and these products are then summed to
* form the initial score for a document.
*
* Terms and phrases repeated in a document indicate the topic of the
* document, so implementations of this method usually return larger values
* when freq
is large, and smaller values when freq
* is small.
*
* @param freq the frequency of a term within a document
* @return a score factor based on a term's within-document frequency
*/
public abstract float tf(float freq);
/**
* Computes a score factor for a simple term and returns an explanation
* for that score factor.
*
*
* The default implementation uses:
*
*
* idf(docFreq, docCount);
*
*
* Note that {@link CollectionStatistics#docCount()} is used instead of
* {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also
* {@link TermStatistics#docFreq()} is used, and when the latter
* is inaccurate, so is {@link CollectionStatistics#docCount()}, and in the same direction.
* In addition, {@link CollectionStatistics#docCount()} does not skew when fields are sparse.
*
* @param collectionStats collection-level statistics
* @param termStats term-level statistics for the term
* @return an Explain object that includes both an idf score factor
and an explanation for the term.
*/
public Explanation idfExplain(CollectionStatistics collectionStats, TermStatistics termStats) {
final long df = termStats.docFreq();
final long docCount = collectionStats.docCount();
final float idf = idf(df, docCount);
return Explanation.match(idf, "idf(docFreq, docCount)",
Explanation.match(df, "docFreq, number of documents containing term"),
Explanation.match(docCount, "docCount, total number of documents with field"));
}
/**
* Computes a score factor for a phrase.
*
*
* The default implementation sums the idf factor for
* each term in the phrase.
*
* @param collectionStats collection-level statistics
* @param termStats term-level statistics for the terms in the phrase
* @return an Explain object that includes both an idf
* score factor for the phrase and an explanation
* for each term.
*/
public Explanation idfExplain(CollectionStatistics collectionStats, TermStatistics termStats[]) {
double idf = 0d; // sum into a double before casting into a float
List subs = new ArrayList<>();
for (final TermStatistics stat : termStats ) {
Explanation idfExplain = idfExplain(collectionStats, stat);
subs.add(idfExplain);
idf += idfExplain.getValue().floatValue();
}
return Explanation.match((float) idf, "idf(), sum of:", subs);
}
/** Computes a score factor based on a term's document frequency (the number
* of documents which contain the term). This value is multiplied by the
* {@link #tf(float)} factor for each term in the query and these products are
* then summed to form the initial score for a document.
*
* Terms that occur in fewer documents are better indicators of topic, so
* implementations of this method usually return larger values for rare terms,
* and smaller values for common terms.
*
* @param docFreq the number of documents which contain the term
* @param docCount the total number of documents in the collection
* @return a score factor based on the term's document frequency
*/
public abstract float idf(long docFreq, long docCount);
/**
* Compute an index-time normalization value for this field instance.
*
* @param length the number of terms in the field, optionally {@link #setDiscountOverlaps(boolean) discounting overlaps}
* @return a length normalization value
*/
public abstract float lengthNorm(int length);
@Override
public final long computeNorm(FieldInvertState state) {
final int numTerms;
if (state.getIndexOptions() == IndexOptions.DOCS && state.getIndexCreatedVersionMajor() >= 8) {
numTerms = state.getUniqueTermCount();
} else if (discountOverlaps) {
numTerms = state.getLength() - state.getNumOverlap();
} else {
numTerms = state.getLength();
}
return SmallFloat.intToByte4(numTerms);
}
@Override
public final SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) {
final Explanation idf = termStats.length == 1
? idfExplain(collectionStats, termStats[0])
: idfExplain(collectionStats, termStats);
float[] normTable = new float[256];
for (int i = 1; i < 256; ++i) {
int length = SmallFloat.byte4ToInt((byte) i);
float norm = lengthNorm(length);
normTable[i] = norm;
}
normTable[0] = 1f / normTable[255];
return new TFIDFScorer(boost, idf, normTable);
}
/** Collection statistics for the TF-IDF model. The only statistic of interest
* to this model is idf. */
class TFIDFScorer extends SimScorer {
/** The idf and its explanation */
private final Explanation idf;
private final float boost;
private final float queryWeight;
final float[] normTable;
public TFIDFScorer(float boost, Explanation idf, float[] normTable) {
// TODO: Validate?
this.idf = idf;
this.boost = boost;
this.queryWeight = boost * idf.getValue().floatValue();
this.normTable = normTable;
}
@Override
public float score(float freq, long norm) {
final float raw = tf(freq) * queryWeight; // compute tf(f)*weight
float normValue = normTable[(int) (norm & 0xFF)];
return raw * normValue; // normalize for field
}
@Override
public Explanation explain(Explanation freq, long norm) {
return explainScore(freq, norm, normTable);
}
private Explanation explainScore(Explanation freq, long encodedNorm, float[] normTable) {
List subs = new ArrayList();
if (boost != 1F) {
subs.add(Explanation.match(boost, "boost"));
}
subs.add(idf);
Explanation tf = Explanation.match(tf(freq.getValue().floatValue()), "tf(freq="+freq.getValue()+"), with freq of:", freq);
subs.add(tf);
float norm = normTable[(int) (encodedNorm & 0xFF)];
Explanation fieldNorm = Explanation.match(norm, "fieldNorm");
subs.add(fieldNorm);
return Explanation.match(
queryWeight * tf.getValue().floatValue() * norm,
"score(freq="+freq.getValue()+"), product of:",
subs);
}
}
}