All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.avaje.ebean.DocumentStore Maven / Gradle / Ivy

There is a newer version: 8.1.1
Show newest version
package com.avaje.ebean;

import com.avaje.ebeanservice.docstore.api.DocQueryRequest;
import org.jetbrains.annotations.Nullable;

import java.io.IOException;
import java.util.List;

/**
 * Document storage operations.
 */
public interface DocumentStore {

  /**
   * Update the associated document store using the result of the query.
   * 

* This will execute the query against the database creating a document for each * bean graph and sending this to the document store. *

*

* Note that the select and fetch paths of the query is set for you to match the * document structure needed based on @DocStore and @DocStoreEmbedded * so what this query requires is the predicates only. *

*

* This query will be executed using findEach so it is safe to use a query * that will fetch a lot of beans. The default bulkBatchSize is used. *

* * @param query The query that selects object to send to the document store. */ void indexByQuery(Query query); /** * Update the associated document store index using the result of the query additionally specifying a * bulkBatchSize to use for sending the messages to ElasticSearch. * * @param query The query that selects object to send to the document store. * @param bulkBatchSize The batch size to use when bulk sending to the document store. */ void indexByQuery(Query query, int bulkBatchSize); /** * Update the document store for all beans of this type. *

* This is the same as indexByQuery where the query has no predicates and so fetches all rows. *

*/ void indexAll(Class beanType); /** * Return the bean by fetching it's content from the document store. * If the document is not found null is returned. *

* Typically this is called indirectly by findUnique() on the query. *

*
{@code
   *
   * Customer customer =
   *   server.find(Customer.class)
   *     .setUseDocStore(true)
   *     .setId(42)
   *     .findUnique();
   *
   * }
*/ @Nullable T find(DocQueryRequest request); /** * Execute the find list query. This request is prepared to execute secondary queries. *

* Typically this is called indirectly by findList() on the query that has setUseDocStore(true). *

*
{@code
   *
   * List newCustomers =
   *  server.find(Customer.class)
   *    .setUseDocStore(true)
   *    .where().eq("status, Customer.Status.NEW)
   *    .findList();
   *
   * }
*/ List findList(DocQueryRequest request); /** * Execute the query against the document store returning the paged list. *

* The query should have firstRow or maxRows set prior to calling this method. *

*

* Typically this is called indirectly by findPagedList() on the query that has setUseDocStore(true). *

* *
{@code
   *
   * PagedList newCustomers =
   *  server.find(Customer.class)
   *    .setUseDocStore(true)
   *    .where().eq("status, Customer.Status.NEW)
   *    .setMaxRows(50)
   *    .findPagedList();
   *
   * }
* */ PagedList findPagedList(DocQueryRequest request); /** * Execute the query against the document store with the expectation of a large set of results * that are processed in a scrolling resultSet fashion. *

* For example, with the ElasticSearch doc store this uses SCROLL. *

*

* Typically this is called indirectly by findEach() on the query that has setUseDocStore(true). *

* *
{@code
   *
   *  server.find(Order.class)
   *    .setUseDocStore(true)
   *    .where()... // perhaps add predicates
   *    .findEach(new QueryEachConsumer() {
   *      @Override
   *      public void accept(Order bean) {
   *        // process the bean
   *      }
   *    });
   *
   * }
*/ void findEach(DocQueryRequest query, QueryEachConsumer consumer); /** * Execute the query against the document store with the expectation of a large set of results * that are processed in a scrolling resultSet fashion. *

* Unlike findEach() this provides the opportunity to stop iterating through the large query. *

*

* For example, with the ElasticSearch doc store this uses SCROLL. *

*

* Typically this is called indirectly by findEachWhile() on the query that has setUseDocStore(true). *

* * *
{@code
   *
   *  server.find(Order.class)
   *    .setUseDocStore(true)
   *    .where()... // perhaps add predicates
   *    .findEachWhile(new QueryEachWhileConsumer() {
   *      @Override
   *      public void accept(Order bean) {
   *        // process the bean
   *
   *        // return true to continue, false to stop
   *        // boolean shouldContinue = ...
   *        return shouldContinue;
   *      }
   *    });
   *
   * }
*/ void findEachWhile(DocQueryRequest query, QueryEachWhileConsumer consumer); /** * Process the queue entries sending updates to the document store or queuing them for later processing. */ long process(List queueEntries) throws IOException; /** * Drop the index from the document store (similar to DDL drop table). * *
{@code
   *
   *   DocumentStore documentStore = server.docStore();
   *
   *   documentStore.dropIndex("product_copy");
   *
   * }
* */ void dropIndex(String indexName); /** * Create an index given a mapping file as a resource in the classPath (similar to DDL create table). * *
{@code
   *
   *   DocumentStore documentStore = server.docStore();
   *
   *   // uses product_copy.mapping.json resource
   *   // ... to define mappings for the index
   *
   *   documentStore.createIndex("product_copy", null);
   *
   * }
* * @param indexName the name of the new index * @param alias the alias of the index */ void createIndex(String indexName, String alias); /** * Copy the index to a new index. *

* This copy process does not use the database but instead will copy from the source index to a destination index. *

* *
{@code
   *
   *  long copyCount = documentStore.copyIndex(Product.class, "product_copy");
   *
   * }
* * @param beanType The bean type of the source index * @param newIndex The name of the index to copy to * @return the number of documents copied to the new index */ long copyIndex(Class beanType, String newIndex); /** * Copy entries from an index to a new index but limiting to documents that have been * modified since the sinceEpochMillis time. *

* To support this the document needs to have a @WhenModified property. *

* *
{@code
   *
   *  long copyCount = documentStore.copyIndex(Product.class, "product_copy", sinceMillis);
   *
   * }
* * @param beanType The bean type of the source index * @param newIndex The name of the index to copy to * @return the number of documents copied to the new index */ long copyIndex(Class beanType, String newIndex, long sinceEpochMillis); /** * Copy from a source index to a new index taking only the documents * matching the given query. * *
{@code
   *
   *  // predicates to select the source documents to copy
   *  Query query = server.find(Product.class)
   *    .where()
   *      .ge("whenModified", new Timestamp(since))
   *      .ge("name", "A")
   *      .lt("name", "D")
   *      .query();
   *
   *  // copy from the source index to "product_copy" index
   *  long copyCount = documentStore.copyIndex(query, "product_copy", 1000);
   *
   * }
* * @param query The query to select the source documents to copy * @param newIndex The target index to copy the documents to * @param bulkBatchSize The ElasticSearch bulk batch size, if 0 uses the default. * @return The number of documents copied to the new index. */ long copyIndex(Query query, String newIndex, int bulkBatchSize); }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy