org.apache.lucene.tests.index.ThreadedIndexingAndSearchingTestCase Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of lucene-test-framework Show documentation
Show all versions of lucene-test-framework Show documentation
Apache Lucene (module: test-framework)
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.tests.index;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.WeakHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiTerms;
import org.apache.lucene.index.SegmentReader;
import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.internal.tests.IndexWriterAccess;
import org.apache.lucene.internal.tests.SegmentReaderAccess;
import org.apache.lucene.internal.tests.TestSecrets;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.tests.analysis.MockAnalyzer;
import org.apache.lucene.tests.store.BaseDirectoryWrapper;
import org.apache.lucene.tests.util.FailOnNonBulkMergesInfoStream;
import org.apache.lucene.tests.util.LineFileDocs;
import org.apache.lucene.tests.util.LuceneTestCase;
import org.apache.lucene.tests.util.TestUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NamedThreadFactory;
import org.apache.lucene.util.PrintStreamInfoStream;
import org.apache.lucene.util.SuppressForbidden;
// TODO
// - mix in forceMerge, addIndexes
// - randomly mix in non-congruent docs
/** Utility class that spawns multiple indexing and searching threads. */
public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCase {
private static final IndexWriterAccess INDEX_WRITER_ACCESS = TestSecrets.getIndexWriterAccess();
private static final SegmentReaderAccess SEGMENT_READER_ACCESS =
TestSecrets.getSegmentReaderAccess();
protected final AtomicBoolean failed = new AtomicBoolean();
protected final AtomicInteger addCount = new AtomicInteger();
protected final AtomicInteger delCount = new AtomicInteger();
protected final AtomicInteger packCount = new AtomicInteger();
protected Directory dir;
protected IndexWriter writer;
private static class SubDocs {
public final String packID;
public final List subIDs;
public boolean deleted;
public SubDocs(String packID, List subIDs) {
this.packID = packID;
this.subIDs = subIDs;
}
}
// Called per-search
protected abstract IndexSearcher getCurrentSearcher() throws Exception;
protected abstract IndexSearcher getFinalSearcher() throws Exception;
protected void releaseSearcher(IndexSearcher s) throws Exception {}
// Called once to run searching
protected abstract void doSearching(ExecutorService es, int maxIterations) throws Exception;
protected Directory getDirectory(Directory in) {
return in;
}
protected void updateDocuments(Term id, List extends Iterable extends IndexableField>> docs)
throws Exception {
writer.updateDocuments(id, docs);
}
protected void addDocuments(Term id, List extends Iterable extends IndexableField>> docs)
throws Exception {
writer.addDocuments(docs);
}
protected void addDocument(Term id, Iterable extends IndexableField> doc) throws Exception {
writer.addDocument(doc);
}
protected void updateDocument(Term term, Iterable extends IndexableField> doc)
throws Exception {
writer.updateDocument(term, doc);
}
protected void deleteDocuments(Term term) throws Exception {
writer.deleteDocuments(term);
}
protected void doAfterIndexingThreadDone() {}
private Thread[] launchIndexingThreads(
final LineFileDocs docs,
int numThreads,
final int maxIterations,
final Set delIDs,
final Set delPackIDs,
final List allSubDocs) {
final Thread[] threads = new Thread[numThreads];
for (int thread = 0; thread < numThreads; thread++) {
threads[thread] =
new Thread() {
@SuppressForbidden(reason = "Thread sleep")
@Override
public void run() {
// TODO: would be better if this were cross thread, so that we make sure one thread
// deleting anothers added docs works:
final List toDeleteIDs = new ArrayList<>();
final List toDeleteSubDocs = new ArrayList<>();
int iterations = 0;
while (++iterations < maxIterations && !failed.get()) {
try {
// Occasional longish pause if running
// nightly
if (LuceneTestCase.TEST_NIGHTLY && random().nextInt(6) == 3) {
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": now long sleep");
}
Thread.sleep(TestUtil.nextInt(random(), 50, 500));
}
// Rate limit ingest rate:
if (random().nextInt(7) == 5) {
Thread.sleep(TestUtil.nextInt(random(), 1, 10));
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": done sleep");
}
}
Document doc = docs.nextDoc();
if (doc == null) {
break;
}
// Maybe add randomly named field
final String addedField;
if (random().nextBoolean()) {
addedField = "extra" + random().nextInt(40);
doc.add(newTextField(addedField, "a random field", Field.Store.YES));
} else {
addedField = null;
}
if (random().nextBoolean()) {
if (random().nextBoolean()) {
// Add/update doc block:
final String packID;
final SubDocs delSubDocs;
if (toDeleteSubDocs.size() > 0 && random().nextBoolean()) {
delSubDocs = toDeleteSubDocs.get(random().nextInt(toDeleteSubDocs.size()));
assert !delSubDocs.deleted;
toDeleteSubDocs.remove(delSubDocs);
// Update doc block, replacing prior packID
packID = delSubDocs.packID;
} else {
delSubDocs = null;
// Add doc block, using new packID
packID = packCount.getAndIncrement() + "";
}
final Field packIDField = newStringField("packID", packID, Field.Store.YES);
final List docIDs = new ArrayList<>();
final SubDocs subDocs = new SubDocs(packID, docIDs);
final List docsList = new ArrayList<>();
allSubDocs.add(subDocs);
doc.add(packIDField);
docsList.add(TestUtil.cloneDocument(doc));
docIDs.add(doc.get("docid"));
final int maxDocCount = TestUtil.nextInt(random(), 1, 10);
while (docsList.size() < maxDocCount) {
doc = docs.nextDoc();
if (doc == null) {
break;
}
docsList.add(TestUtil.cloneDocument(doc));
docIDs.add(doc.get("docid"));
}
addCount.addAndGet(docsList.size());
final Term packIDTerm = new Term("packID", packID);
if (delSubDocs != null) {
delSubDocs.deleted = true;
delIDs.addAll(delSubDocs.subIDs);
delCount.addAndGet(delSubDocs.subIDs.size());
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName()
+ ": update pack packID="
+ delSubDocs.packID
+ " count="
+ docsList.size()
+ " docs="
+ docIDs);
}
updateDocuments(packIDTerm, docsList);
} else {
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName()
+ ": add pack packID="
+ packID
+ " count="
+ docsList.size()
+ " docs="
+ docIDs);
}
addDocuments(packIDTerm, docsList);
}
doc.removeField("packID");
if (random().nextInt(5) == 2) {
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName() + ": buffer del id:" + packID);
}
toDeleteSubDocs.add(subDocs);
}
} else {
// Add single doc
final String docid = doc.get("docid");
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName() + ": add doc docid:" + docid);
}
addDocument(new Term("docid", docid), doc);
addCount.getAndIncrement();
if (random().nextInt(5) == 3) {
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName()
+ ": buffer del id:"
+ doc.get("docid"));
}
toDeleteIDs.add(docid);
}
}
} else {
// Update single doc, but we never re-use
// and ID so the delete will never
// actually happen:
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName() + ": update doc id:" + doc.get("docid"));
}
final String docid = doc.get("docid");
updateDocument(new Term("docid", docid), doc);
addCount.getAndIncrement();
if (random().nextInt(5) == 3) {
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName()
+ ": buffer del id:"
+ doc.get("docid"));
}
toDeleteIDs.add(docid);
}
}
if (random().nextInt(30) == 17) {
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName()
+ ": apply "
+ toDeleteIDs.size()
+ " deletes");
}
for (String id : toDeleteIDs) {
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName() + ": del term=id:" + id);
}
deleteDocuments(new Term("docid", id));
}
final int count = delCount.addAndGet(toDeleteIDs.size());
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName() + ": tot " + count + " deletes");
}
delIDs.addAll(toDeleteIDs);
toDeleteIDs.clear();
for (SubDocs subDocs : toDeleteSubDocs) {
assert !subDocs.deleted;
delPackIDs.add(subDocs.packID);
deleteDocuments(new Term("packID", subDocs.packID));
subDocs.deleted = true;
if (VERBOSE) {
System.out.println(
Thread.currentThread().getName()
+ ": del subs: "
+ subDocs.subIDs
+ " packID="
+ subDocs.packID);
}
delIDs.addAll(subDocs.subIDs);
delCount.addAndGet(subDocs.subIDs.size());
}
toDeleteSubDocs.clear();
}
if (addedField != null) {
doc.removeField(addedField);
}
} catch (Throwable t) {
System.out.println(Thread.currentThread().getName() + ": hit exc");
t.printStackTrace();
failed.set(true);
throw new RuntimeException(t);
}
}
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": indexing done");
}
doAfterIndexingThreadDone();
}
};
threads[thread].start();
}
return threads;
}
protected void runSearchThreads(final int maxIterations) throws Exception {
final int numThreads = TEST_NIGHTLY ? TestUtil.nextInt(random(), 1, 5) : 2;
final Thread[] searchThreads = new Thread[numThreads];
final AtomicLong totHits = new AtomicLong();
// silly starting guess:
final AtomicInteger totTermCount = new AtomicInteger(100);
// TODO: we should enrich this to do more interesting searches
for (int thread = 0; thread < searchThreads.length; thread++) {
searchThreads[thread] =
new Thread() {
@Override
public void run() {
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": launch search thread");
}
int iterations = 0;
while (++iterations < maxIterations && !failed.get()) {
try {
final IndexSearcher s = getCurrentSearcher();
try {
// Verify 1) IW is correctly setting
// diagnostics, and 2) segment warming for
// merged segments is actually happening:
for (final LeafReaderContext sub : s.getIndexReader().leaves()) {
SegmentReader segReader = (SegmentReader) sub.reader();
Map diagnostics =
segReader.getSegmentInfo().info.getDiagnostics();
assertNotNull(diagnostics);
String source = diagnostics.get("source");
assertNotNull(source);
if (source.equals("merge")) {
assertTrue(
"sub reader "
+ sub
+ " wasn't warmed: warmed="
+ warmed
+ " diagnostics="
+ diagnostics
+ " si="
+ segReader.getSegmentInfo(),
!assertMergedSegmentsWarmed
|| warmed.containsKey(SEGMENT_READER_ACCESS.getCore(segReader)));
}
}
if (s.getIndexReader().numDocs() > 0) {
smokeTestSearcher(s);
Terms terms = MultiTerms.getTerms(s.getIndexReader(), "body");
if (terms == null) {
continue;
}
TermsEnum termsEnum = terms.iterator();
int seenTermCount = 0;
int shift;
int trigger;
if (totTermCount.get() < 30) {
shift = 0;
trigger = 1;
} else {
trigger = totTermCount.get() / 30;
shift = random().nextInt(trigger);
}
int iters = 0;
while (++iters < maxIterations) {
BytesRef term = termsEnum.next();
if (term == null) {
totTermCount.set(seenTermCount);
break;
}
seenTermCount++;
// search 30 terms
if ((seenTermCount + shift) % trigger == 0) {
// if (VERBOSE) {
// System.out.println(Thread.currentThread().getName() + " now search
// body:" + term.utf8ToString());
// }
totHits.addAndGet(runQuery(s, new TermQuery(new Term("body", term))));
}
}
// if (VERBOSE) {
// System.out.println(Thread.currentThread().getName() + ": search done");
// }
}
} finally {
releaseSearcher(s);
}
} catch (Throwable t) {
System.out.println(Thread.currentThread().getName() + ": hit exc");
failed.set(true);
t.printStackTrace(System.out);
throw new RuntimeException(t);
}
}
}
};
searchThreads[thread].start();
}
for (Thread thread : searchThreads) {
thread.join();
}
if (VERBOSE) {
System.out.println("TEST: DONE search: totHits=" + totHits);
}
}
protected void doAfterWriter(ExecutorService es) throws Exception {}
protected void doClose() throws Exception {}
protected boolean assertMergedSegmentsWarmed = true;
private final Map © 2015 - 2025 Weber Informatics LLC | Privacy Policy