Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.hadoop.rest;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.map.DeserializationConfig;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
import org.elasticsearch.hadoop.EsHadoopIllegalStateException;
import org.elasticsearch.hadoop.cfg.ConfigurationOptions;
import org.elasticsearch.hadoop.cfg.Settings;
import org.elasticsearch.hadoop.rest.Request.Method;
import org.elasticsearch.hadoop.rest.query.QueryBuilder;
import org.elasticsearch.hadoop.rest.stats.Stats;
import org.elasticsearch.hadoop.rest.stats.StatsAware;
import org.elasticsearch.hadoop.serialization.ParsingUtils;
import org.elasticsearch.hadoop.serialization.dto.NodeInfo;
import org.elasticsearch.hadoop.serialization.json.JacksonJsonGenerator;
import org.elasticsearch.hadoop.serialization.json.JacksonJsonParser;
import org.elasticsearch.hadoop.serialization.json.JsonFactory;
import org.elasticsearch.hadoop.serialization.json.ObjectReader;
import org.elasticsearch.hadoop.util.ByteSequence;
import org.elasticsearch.hadoop.util.BytesArray;
import org.elasticsearch.hadoop.util.EsMajorVersion;
import org.elasticsearch.hadoop.util.FastByteArrayOutputStream;
import org.elasticsearch.hadoop.util.IOUtils;
import org.elasticsearch.hadoop.util.ObjectUtils;
import org.elasticsearch.hadoop.util.StringUtils;
import org.elasticsearch.hadoop.util.TrackingBytesArray;
import org.elasticsearch.hadoop.util.unit.TimeValue;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import static org.elasticsearch.hadoop.rest.Request.Method.DELETE;
import static org.elasticsearch.hadoop.rest.Request.Method.GET;
import static org.elasticsearch.hadoop.rest.Request.Method.HEAD;
import static org.elasticsearch.hadoop.rest.Request.Method.POST;
import static org.elasticsearch.hadoop.rest.Request.Method.PUT;
public class RestClient implements Closeable, StatsAware {
private final static int MAX_BULK_ERROR_MESSAGES = 5;
private NetworkClient network;
private final ObjectMapper mapper;
private final TimeValue scrollKeepAlive;
private final boolean indexReadMissingAsEmpty;
private final HttpRetryPolicy retryPolicy;
final EsMajorVersion internalVersion;
{
mapper = new ObjectMapper();
mapper.configure(DeserializationConfig.Feature.USE_ANNOTATIONS, false);
mapper.configure(SerializationConfig.Feature.USE_ANNOTATIONS, false);
}
private final Stats stats = new Stats();
public enum HEALTH {
RED, YELLOW, GREEN
}
public RestClient(Settings settings) {
network = new NetworkClient(settings);
scrollKeepAlive = TimeValue.timeValueMillis(settings.getScrollKeepAlive());
indexReadMissingAsEmpty = settings.getIndexReadMissingAsEmpty();
String retryPolicyName = settings.getBatchWriteRetryPolicy();
if (ConfigurationOptions.ES_BATCH_WRITE_RETRY_POLICY_SIMPLE.equals(retryPolicyName)) {
retryPolicyName = SimpleHttpRetryPolicy.class.getName();
}
else if (ConfigurationOptions.ES_BATCH_WRITE_RETRY_POLICY_NONE.equals(retryPolicyName)) {
retryPolicyName = NoHttpRetryPolicy.class.getName();
}
retryPolicy = ObjectUtils.instantiate(retryPolicyName, settings);
// Assume that the elasticsearch major version is the latest if the version is not already present in the settings
internalVersion = settings.getInternalVersionOrLatest();
}
public List getHttpNodes(boolean clientNodeOnly) {
Map> nodesData = get("_nodes/http", "nodes");
List nodes = new ArrayList();
for (Entry> entry : nodesData.entrySet()) {
NodeInfo node = new NodeInfo(entry.getKey(), entry.getValue());
if (node.hasHttp() && (!clientNodeOnly || node.isClient())) {
nodes.add(node);
}
}
return nodes;
}
public List getHttpClientNodes() {
return getHttpNodes(true);
}
public List getHttpDataNodes() {
List nodes = getHttpNodes(false);
Iterator it = nodes.iterator();
while (it.hasNext()) {
NodeInfo node = it.next();
if (!node.isData()) {
it.remove();
}
}
return nodes;
}
public List getHttpIngestNodes() {
List nodes = getHttpNodes(false);
Iterator it = nodes.iterator();
while (it.hasNext()) {
NodeInfo nodeInfo = it.next();
if (!nodeInfo.isIngest()) {
it.remove();
}
}
return nodes;
}
public T get(String q, String string) {
return parseContent(execute(GET, q), string);
}
@SuppressWarnings("unchecked")
private T parseContent(InputStream content, String string) {
Map map = Collections.emptyMap();
try {
// create parser manually to lower Jackson requirements
JsonParser jsonParser = mapper.getJsonFactory().createJsonParser(content);
try {
map = mapper.readValue(jsonParser, Map.class);
} finally {
countStreamStats(content);
}
} catch (IOException ex) {
throw new EsHadoopParsingException(ex);
}
return (T) (string != null ? map.get(string) : map);
}
public BulkResponse bulk(Resource resource, TrackingBytesArray data) {
Retry retry = retryPolicy.init();
BulkResponse processedResponse;
boolean isRetry = false;
do {
// NB: dynamically get the stats since the transport can change
long start = network.transportStats().netTotalTime;
Response response = execute(PUT, resource.bulk(), data);
long spent = network.transportStats().netTotalTime - start;
stats.bulkTotal++;
stats.docsSent += data.entries();
stats.bulkTotalTime += spent;
// bytes will be counted by the transport layer
if (isRetry) {
stats.docsRetried += data.entries();
stats.bytesRetried += data.length();
stats.bulkRetries++;
stats.bulkRetriesTotalTime += spent;
}
isRetry = true;
processedResponse = processBulkResponse(response, data);
} while (data.length() > 0 && retry.retry(processedResponse.getHttpStatus()));
return processedResponse;
}
@SuppressWarnings("rawtypes")
BulkResponse processBulkResponse(Response response, TrackingBytesArray data) {
InputStream content = response.body();
try {
ObjectReader r = JsonFactory.objectReader(mapper, Map.class);
JsonParser parser = mapper.getJsonFactory().createJsonParser(content);
try {
if (ParsingUtils.seek(new JacksonJsonParser(parser), "items") == null) {
// recorded bytes are ack here
stats.bytesAccepted += data.length();
stats.docsAccepted += data.entries();
return BulkResponse.ok(data.entries());
}
} finally {
countStreamStats(content);
}
int docsSent = data.entries();
List errorMessageSample = new ArrayList(MAX_BULK_ERROR_MESSAGES);
int errorMessagesSoFar = 0;
int entryToDeletePosition = 0; // head of the list
for (Iterator