io.ebean.DocumentStore Maven / Gradle / Ivy
package io.ebean;
import org.jspecify.annotations.NullMarked;
import org.jspecify.annotations.Nullable;
import io.ebean.docstore.DocQueryContext;
import io.ebean.docstore.RawDoc;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Predicate;
/**
* Document storage operations.
*/
@NullMarked
public interface DocumentStore {
/**
* Update the associated document store using the result of the query.
*
* This will execute the query against the database creating a document for each
* bean graph and sending this to the document store.
*
*
* Note that the select and fetch paths of the query is set for you to match the
* document structure needed based on @DocStore
and @DocStoreEmbedded
* so what this query requires is the predicates only.
*
*
* This query will be executed using findEach so it is safe to use a query
* that will fetch a lot of beans. The default bulkBatchSize is used.
*
*
* @param query The query that selects object to send to the document store.
*/
void indexByQuery(Query query);
/**
* Update the associated document store index using the result of the query additionally specifying a
* bulkBatchSize to use for sending the messages to ElasticSearch.
*
* @param query The query that selects object to send to the document store.
* @param bulkBatchSize The batch size to use when bulk sending to the document store.
*/
void indexByQuery(Query query, int bulkBatchSize);
/**
* Update the document store for all beans of this type.
*
* This is the same as indexByQuery where the query has no predicates and so fetches all rows.
*
*/
void indexAll(Class> beanType);
/**
* Return the bean by fetching it's content from the document store.
* If the document is not found null is returned.
*
* Typically this is called indirectly by findOne() on the query.
*
* {@code
*
* Customer customer =
* database.find(Customer.class)
* .setUseDocStore(true)
* .setId(42)
* .findOne();
*
* }
*/
@Nullable
T find(DocQueryContext request);
/**
* Execute the find list query. This request is prepared to execute secondary queries.
*
* Typically this is called indirectly by findList() on the query that has setUseDocStore(true).
*
* {@code
*
* List newCustomers =
* database.find(Customer.class)
* .setUseDocStore(true)
* .where().eq("status, Customer.Status.NEW)
* .findList();
*
* }
*/
List findList(DocQueryContext request);
/**
* Execute the query against the document store returning the paged list.
*
* The query should have firstRow
or maxRows
set prior to calling this method.
*
*
* Typically this is called indirectly by findPagedList() on the query that has setUseDocStore(true).
*
* {@code
*
* PagedList newCustomers =
* database.find(Customer.class)
* .setUseDocStore(true)
* .where().eq("status, Customer.Status.NEW)
* .setMaxRows(50)
* .findPagedList();
*
* }
*/
PagedList findPagedList(DocQueryContext request);
/**
* Execute the query against the document store with the expectation of a large set of results
* that are processed in a scrolling resultSet fashion.
*
* For example, with the ElasticSearch doc store this uses SCROLL.
*
*
* Typically this is called indirectly by findEach() on the query that has setUseDocStore(true).
*
* {@code
*
* database.find(Order.class)
* .setUseDocStore(true)
* .where()... // perhaps add predicates
* .findEach((Order order) -> {
* // process the bean ...
* });
*
* }
*/
void findEach(DocQueryContext query, Consumer consumer);
/**
* Execute the query against the document store with the expectation of a large set of results
* that are processed in a scrolling resultSet fashion.
*
* Unlike findEach() this provides the opportunity to stop iterating through the large query.
*
*
* For example, with the ElasticSearch doc store this uses SCROLL.
*
*
* Typically this is called indirectly by findEachWhile() on the query that has setUseDocStore(true).
*
* {@code
*
* database.find(Order.class)
* .setUseDocStore(true)
* .where()... // perhaps add predicates
* .findEachWhile(new Predicate() {
* @Override
* public void accept(Order bean) {
* // process the bean
*
* // return true to continue, false to stop
* // boolean shouldContinue = ...
* return shouldContinue;
* }
* });
*
* }
*/
void findEachWhile(DocQueryContext query, Predicate consumer);
/**
* Find each processing raw documents.
*
* @param indexNameType The full index name and type
* @param rawQuery The query to execute
* @param consumer Consumer to process each document
*/
void findEach(String indexNameType, String rawQuery, Consumer consumer);
/**
* Find each processing raw documents stopping when the predicate returns false.
*
* @param indexNameType The full index name and type
* @param rawQuery The query to execute
* @param consumer Consumer to process each document until false is returned
*/
void findEachWhile(String indexNameType, String rawQuery, Predicate consumer);
/**
* Process the queue entries sending updates to the document store or queuing them for later processing.
*/
long process(List queueEntries) throws IOException;
/**
* Drop the index from the document store (similar to DDL drop table).
* {@code
*
* DocumentStore documentStore = database.docStore();
*
* documentStore.dropIndex("product_copy");
*
* }
*/
void dropIndex(String indexName);
/**
* Create an index given a mapping file as a resource in the classPath (similar to DDL create table).
* {@code
*
* DocumentStore documentStore = database.docStore();
*
* // uses product_copy.mapping.json resource
* // ... to define mappings for the index
*
* documentStore.createIndex("product_copy", null);
*
* }
*
* @param indexName the name of the new index
* @param alias the alias of the index
*/
void createIndex(String indexName, String alias);
/**
* Modify the settings on an index.
*
* For example, this can be used be used to set elasticSearch refresh_interval
* on an index before a bulk update.
*
* {@code
*
* // refresh_interval -1 ... disable refresh while bulk loading
*
* Map settings = new LinkedHashMap<>();
* settings.put("refresh_interval", "-1");
*
* documentStore.indexSettings("product", settings);
*
* }
* {@code
*
* // refresh_interval 1s ... restore after bulk loading
*
* Map settings = new LinkedHashMap<>();
* settings.put("refresh_interval", "1s");
*
* documentStore.indexSettings("product", settings);
*
* }
*
* @param indexName the name of the index to update settings on
* @param settings the settings to set on the index
*/
void indexSettings(String indexName, Map settings);
/**
* Copy the index to a new index.
*
* This copy process does not use the database but instead will copy from the source index to a destination index.
*
* {@code
*
* long copyCount = documentStore.copyIndex(Product.class, "product_copy");
*
* }
*
* @param beanType The bean type of the source index
* @param newIndex The name of the index to copy to
* @return the number of documents copied to the new index
*/
long copyIndex(Class> beanType, String newIndex);
/**
* Copy entries from an index to a new index but limiting to documents that have been
* modified since the sinceEpochMillis time.
*
* To support this the document needs to have a @WhenModified
property.
*
* {@code
*
* long copyCount = documentStore.copyIndex(Product.class, "product_copy", sinceMillis);
*
* }
*
* @param beanType The bean type of the source index
* @param newIndex The name of the index to copy to
* @return the number of documents copied to the new index
*/
long copyIndex(Class> beanType, String newIndex, long sinceEpochMillis);
/**
* Copy from a source index to a new index taking only the documents
* matching the given query.
* {@code
*
* // predicates to select the source documents to copy
* Query query = database.find(Product.class)
* .where()
* .ge("whenModified", new Timestamp(since))
* .ge("name", "A")
* .lt("name", "D")
* .query();
*
* // copy from the source index to "product_copy" index
* long copyCount = documentStore.copyIndex(query, "product_copy", 1000);
*
* }
*
* @param query The query to select the source documents to copy
* @param newIndex The target index to copy the documents to
* @param bulkBatchSize The ElasticSearch bulk batch size, if 0 uses the default.
* @return The number of documents copied to the new index.
*/
long copyIndex(Query> query, String newIndex, int bulkBatchSize);
}