
net.sf.okapi.lib.search.lucene.analysis.NgramAnalyzer Maven / Gradle / Ivy
/*===========================================================================
Copyright (C) 2008-2009 by the Okapi Framework contributors
-----------------------------------------------------------------------------
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================*/
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package net.sf.okapi.lib.search.lucene.analysis;
import java.io.Reader;
import java.util.Locale;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
import org.apache.lucene.analysis.pattern.PatternReplaceFilter;
import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
/**
*
* @author HaslamJD
* @author HARGRAVEJE
*/
public final class NgramAnalyzer extends Analyzer {
public static final int MAX_INPUT_SIZE = 4096;
private int ngramLength;
public NgramAnalyzer(int ngramLength) {
if (ngramLength <= 0) {
throw new IllegalArgumentException(
"'ngramLength' cannot be less than 0");
}
this.ngramLength = ngramLength;
}
@Override
protected TokenStreamComponents createComponents(String fieldName) {
/* Effectively implementing the analysis chain that would be
written like this in Solr (where ngramLength = 4):
*/
final Tokenizer source = new KeywordTokenizer(MAX_INPUT_SIZE);
TokenStream result =
new LengthFilter(
new NGramTokenFilter(
new LowerCaseFilter(
new PatternReplaceFilter(
source, Pattern.compile("\\s+"), " ", true)),
ngramLength, ngramLength, true),
1, ngramLength);
return new TokenStreamComponents(source, result);
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy