Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.hadoop.rest;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.map.DeserializationConfig;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.SerializationConfig;
import org.elasticsearch.hadoop.cfg.ConfigurationOptions;
import org.elasticsearch.hadoop.cfg.Settings;
import org.elasticsearch.hadoop.rest.Request.Method;
import org.elasticsearch.hadoop.rest.stats.Stats;
import org.elasticsearch.hadoop.rest.stats.StatsAware;
import org.elasticsearch.hadoop.serialization.ParsingUtils;
import org.elasticsearch.hadoop.serialization.dto.Node;
import org.elasticsearch.hadoop.serialization.json.JacksonJsonParser;
import org.elasticsearch.hadoop.serialization.json.JsonFactory;
import org.elasticsearch.hadoop.serialization.json.ObjectReader;
import org.elasticsearch.hadoop.util.ByteSequence;
import org.elasticsearch.hadoop.util.BytesArray;
import org.elasticsearch.hadoop.util.IOUtils;
import org.elasticsearch.hadoop.util.ObjectUtils;
import org.elasticsearch.hadoop.util.StringUtils;
import org.elasticsearch.hadoop.util.TrackingBytesArray;
import org.elasticsearch.hadoop.util.unit.TimeValue;
import static org.elasticsearch.hadoop.rest.Request.Method.*;
public class RestClient implements Closeable, StatsAware {
private NetworkClient network;
private final ObjectMapper mapper;
private TimeValue scrollKeepAlive;
private boolean indexReadMissingAsEmpty;
private final HttpRetryPolicy retryPolicy;
{
mapper = new ObjectMapper();
mapper.configure(DeserializationConfig.Feature.USE_ANNOTATIONS, false);
mapper.configure(SerializationConfig.Feature.USE_ANNOTATIONS, false);
}
private final Stats stats = new Stats();
public enum HEALTH {
RED, YELLOW, GREEN
}
public RestClient(Settings settings) {
network = new NetworkClient(settings);
scrollKeepAlive = TimeValue.timeValueMillis(settings.getScrollKeepAlive());
indexReadMissingAsEmpty = settings.getIndexReadMissingAsEmpty();
String retryPolicyName = settings.getBatchWriteRetryPolicy();
if (ConfigurationOptions.ES_BATCH_WRITE_RETRY_POLICY_SIMPLE.equals(retryPolicyName)) {
retryPolicyName = SimpleHttpRetryPolicy.class.getName();
}
else if (ConfigurationOptions.ES_BATCH_WRITE_RETRY_POLICY_NONE.equals(retryPolicyName)) {
retryPolicyName = NoHttpRetryPolicy.class.getName();
}
retryPolicy = ObjectUtils.instantiate(retryPolicyName, settings);
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public List discoverNodes() {
String endpoint = "_nodes/transport";
Map nodes = (Map) get(endpoint, "nodes");
List hosts = new ArrayList(nodes.size());
for (Map value : nodes.values()) {
String inet = (String) value.get("http_address");
if (StringUtils.hasText(inet)) {
int startIp = inet.indexOf("/") + 1;
int endIp = inet.indexOf("]");
inet = inet.substring(startIp, endIp);
hosts.add(inet);
}
}
return hosts;
}
private T get(String q, String string) {
return parseContent(execute(GET, q), string);
}
@SuppressWarnings("unchecked")
private T parseContent(InputStream content, String string) {
Map map = Collections.emptyMap();
try {
// create parser manually to lower Jackson requirements
JsonParser jsonParser = mapper.getJsonFactory().createJsonParser(content);
try {
map = mapper.readValue(jsonParser, Map.class);
} finally {
countStreamStats(content);
}
} catch (IOException ex) {
throw new EsHadoopParsingException(ex);
}
return (T) (string != null ? map.get(string) : map);
}
public BitSet bulk(Resource resource, TrackingBytesArray data) {
Retry retry = retryPolicy.init();
int httpStatus = 0;
boolean isRetry = false;
do {
// NB: dynamically get the stats since the transport can change
long start = network.transportStats().netTotalTime;
Response response = execute(PUT, resource.bulk(), data);
long spent = network.transportStats().netTotalTime - start;
stats.bulkTotal++;
stats.docsSent += data.entries();
stats.bulkTotalTime += spent;
// bytes will be counted by the transport layer
if (isRetry) {
stats.docsRetried += data.entries();
stats.bytesRetried += data.length();
stats.bulkRetries++;
stats.bulkRetriesTotalTime += spent;
}
isRetry = true;
httpStatus = (retryFailedEntries(response.body(), data) ? HttpStatus.SERVICE_UNAVAILABLE : HttpStatus.OK);
} while (data.length() > 0 && retry.retry(httpStatus));
return data.leftoversPosition();
}
@SuppressWarnings("rawtypes")
private boolean retryFailedEntries(InputStream content, TrackingBytesArray data) {
try {
ObjectReader r = JsonFactory.objectReader(mapper, Map.class);
JsonParser parser = mapper.getJsonFactory().createJsonParser(content);
try {
if (ParsingUtils.seek("items", new JacksonJsonParser(parser)) == null) {
// recorded bytes are ack here
stats.bytesAccepted += data.length();
stats.docsAccepted += data.entries();
return false;
}
} finally {
countStreamStats(content);
}
int entryToDeletePosition = 0; // head of the list
for (Iterator