Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
package com.datorama.oss.timbermill;
import com.amazonaws.auth.AWS4Signer;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.util.IOUtils;
import com.datorama.oss.timbermill.common.ElasticsearchUtil;
import com.datorama.oss.timbermill.common.KamonConstants;
import com.datorama.oss.timbermill.common.ZonedDateTimeConverter;
import com.datorama.oss.timbermill.common.persistence.DbBulkRequest;
import com.datorama.oss.timbermill.common.persistence.PersistenceHandler;
import com.datorama.oss.timbermill.common.persistence.IndexRetryManager;
import com.datorama.oss.timbermill.unit.Task;
import com.datorama.oss.timbermill.unit.TaskStatus;
import com.evanlennick.retry4j.CallExecutorBuilder;
import com.evanlennick.retry4j.Status;
import com.evanlennick.retry4j.config.RetryConfig;
import com.evanlennick.retry4j.config.RetryConfigBuilder;
import com.evanlennick.retry4j.exception.RetriesExhaustedException;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.gson.*;
import com.google.gson.internal.LazilyParsedNumber;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpHost;
import org.apache.http.HttpRequestInterceptor;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.*;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.*;
import org.elasticsearch.client.core.CountRequest;
import org.elasticsearch.client.core.CountResponse;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.indices.PutIndexTemplateRequest;
import org.elasticsearch.client.indices.rollover.RolloverRequest;
import org.elasticsearch.client.indices.rollover.RolloverResponse;
import org.elasticsearch.cluster.metadata.AliasMetadata;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.*;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalOrder;
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.slice.SliceBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
import java.io.IOException;
import java.io.InputStream;
import java.net.SocketTimeoutException;
import java.time.ZonedDateTime;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static com.datorama.oss.timbermill.common.ElasticsearchUtil.*;
import static org.elasticsearch.action.update.UpdateHelper.ContextFields.CTX;
import static org.elasticsearch.common.Strings.EMPTY_ARRAY;
public class ElasticsearchClient {
public static final String TYPE = "_doc";
public static final String TIMBERMILL_SCRIPT = "timbermill-script";
public static final Gson GSON = new GsonBuilder().registerTypeAdapter(ZonedDateTime.class, new ZonedDateTimeConverter()).create();
private static final TermsQueryBuilder PARTIALS_QUERY = new TermsQueryBuilder("status", TaskStatus.PARTIAL_ERROR, TaskStatus.PARTIAL_INFO_ONLY, TaskStatus.PARTIAL_SUCCESS);
private static final String[] ALL_TASK_FIELDS = {"*"};
private static final String[] PARENT_FIELDS_TO_FETCH = {"name", "parentId", "primaryId", "parentsPath", "orphan", "_index", CTX + ".*"};
private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchClient.class);
private static final String TTL_FIELD = "meta.dateToDelete";
private static final String META_TASK_BEGIN = "meta.taskBegin";
protected final RestHighLevelClient client;
private final int indexBulkSize;
private final ExecutorService executorService;
private final int numberOfShards;
private final int maxSlices;
private final RetryConfig retryConfig;
private long maxIndexAge;
private long maxIndexSizeInGB;
private long maxIndexDocs;
private final int numOfElasticSearchActionsTries;
private IndexRetryManager retryManager;
private Bulker bulker;
private int searchMaxSize;
private final int scrollLimitation;
private final int scrollTimeoutSeconds;
private final int fetchByIdsPartitions;
private AtomicInteger concurrentScrolls = new AtomicInteger(0);
private final int expiredMaxIndicesTodeleteInParallel;
public ElasticsearchClient(String elasticUrl, int indexBulkSize, int indexingThreads, String awsRegion, String elasticUser, String elasticPassword, long maxIndexAge,
long maxIndexSizeInGB, long maxIndexDocs, int numOfElasticSearchActionsTries, int maxBulkIndexFetches, int searchMaxSize, PersistenceHandler persistenceHandler, int numberOfShards, int numberOfReplicas,
int maxTotalFields, Bulker bulker, int scrollLimitation, int scrollTimeoutSeconds, int fetchByIdsPartitions, int expiredMaxIndicesTodeleteInParallel) {
validateProperties(indexBulkSize, indexingThreads, maxIndexAge, maxIndexSizeInGB, maxIndexDocs, numOfElasticSearchActionsTries, numOfElasticSearchActionsTries, scrollLimitation,
scrollTimeoutSeconds, fetchByIdsPartitions, numberOfShards, expiredMaxIndicesTodeleteInParallel);
this.indexBulkSize = indexBulkSize;
this.searchMaxSize = searchMaxSize;
this.maxIndexAge = maxIndexAge;
this.maxIndexSizeInGB = maxIndexSizeInGB;
this.maxIndexDocs = maxIndexDocs;
this.numOfElasticSearchActionsTries = numOfElasticSearchActionsTries;
this.executorService = Executors.newFixedThreadPool(indexingThreads);
this.scrollLimitation = scrollLimitation;
this.scrollTimeoutSeconds = scrollTimeoutSeconds;
this.fetchByIdsPartitions = fetchByIdsPartitions;
this.numberOfShards = numberOfShards;
this.maxSlices = numberOfShards <= 1 ? 2 : numberOfShards;
this.expiredMaxIndicesTodeleteInParallel = expiredMaxIndicesTodeleteInParallel;
HttpHost httpHost = HttpHost.create(elasticUrl);
LOG.info("Connecting to Elasticsearch at url {}", httpHost.toURI());
RestClientBuilder builder = RestClient.builder(httpHost);
if (!StringUtils.isEmpty(awsRegion)){
LOG.info("Trying to connect to AWS Elasticsearch");
AWS4Signer signer = new AWS4Signer();
String serviceName = "es";
signer.setServiceName(serviceName);
signer.setRegionName(awsRegion);
HttpRequestInterceptor interceptor = new AWSRequestSigningApacheInterceptor(serviceName, signer, new DefaultAWSCredentialsProviderChain());
builder.setHttpClientConfigCallback(callback -> callback.addInterceptorLast(interceptor));
}
if (!StringUtils.isEmpty(elasticUser)){
LOG.info("Connection to Elasticsearch using user {}", elasticUser);
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(elasticUser, elasticPassword));
builder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder
.setDefaultCredentialsProvider(credentialsProvider));
}
client = new RestHighLevelClient(builder);
if (bulker == null){
bulker = new Bulker(client);
}
this.bulker = bulker;
this.retryManager = new IndexRetryManager(numOfElasticSearchActionsTries, maxBulkIndexFetches, persistenceHandler, bulker);
retryConfig = new RetryConfigBuilder()
.withMaxNumberOfTries(numOfElasticSearchActionsTries)
.retryOnAnyException()
.withDelayBetweenTries(1, ChronoUnit.SECONDS)
.withExponentialBackoff()
.build();
bootstrapElasticsearch(numberOfShards, numberOfReplicas, maxTotalFields);
}
private void validateProperties(int indexBulkSize, int indexingThreads, long maxIndexAge, long maxIndexSizeInGB, long maxIndexDocs, int numOfMergedTasksTries, int numOfElasticSearchActionsTries,
int scrollLimitation, int scrollTimeoutSeconds, int fetchByIdsPartitions, int numberOfShards, int expiredMaxIndicesToDeleteInParallel) {
if (indexBulkSize < 1) {
throw new RuntimeException("Index bulk size property should be larger than 0");
}
if (indexingThreads < 1) {
throw new RuntimeException("Indexing threads property should be larger than 0");
}
if (maxIndexAge < 1) {
throw new RuntimeException("Index max age property should be larger than 0");
}
if (maxIndexSizeInGB < 1) {
throw new RuntimeException("Index max size property should be larger than 0");
}
if (maxIndexDocs < 1) {
throw new RuntimeException("Index max docs property should be larger than 0");
}
if (numOfMergedTasksTries < 0) {
throw new RuntimeException("Max merge tasks retries property should not be below 0");
}
if (numOfElasticSearchActionsTries < 0) {
throw new RuntimeException("Max elasticsearch actions tries property should not be below 0");
}
if (scrollLimitation < 0) {
throw new RuntimeException("Elasticsearch scroll limitation property should not be below 0");
}
if (scrollTimeoutSeconds < 1) {
throw new RuntimeException("Elasticsearch scroll timeout limitation property should not be below 1");
}
if (fetchByIdsPartitions < 1) {
throw new RuntimeException("Fetch By Ids Partitions property should not be below 1");
}
if (numberOfShards < 1) {
throw new RuntimeException("Number of shards property should not be below 1");
}
if (expiredMaxIndicesToDeleteInParallel < 1) {
throw new RuntimeException("Max Expired Indices To Delete In Parallel property should not be below 1");
}
}
public Task getTaskById(String taskId){
Map tasksByIds = getTasksByIds(Sets.newHashSet(taskId), "Test", ElasticsearchClient.ALL_TASK_FIELDS,
org.elasticsearch.common.Strings.EMPTY_ARRAY, TIMBERMILL_INDEX_WILDCARD);
return tasksByIds.get(taskId);
}
public List getMultipleTasksById(String taskId) {
IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery().addIds(taskId);
Map> map = Maps.newHashMap();
List>>> futures = runScrollInSlices(idsQueryBuilder, "Test", EMPTY_ARRAY, ALL_TASK_FIELDS, TIMBERMILL_INDEX_WILDCARD);
for (Future