All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.databricks.jdbc.client.impl.thrift.commons.DatabricksHttpTTransport Maven / Gradle / Ivy

There is a newer version: 2.6.40-patch-1
Show newest version
package com.databricks.jdbc.client.impl.thrift.commons;

import com.databricks.jdbc.client.DatabricksHttpException;
import com.databricks.jdbc.client.http.DatabricksHttpClient;
import com.databricks.jdbc.commons.LogLevel;
import com.databricks.jdbc.commons.util.LoggingUtil;
import com.databricks.jdbc.commons.util.ValidationUtil;
import com.google.common.annotations.VisibleForTesting;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.thrift.TConfiguration;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;

public class DatabricksHttpTTransport extends TTransport {
  private final DatabricksHttpClient httpClient;
  private final String url;
  private Map customHeaders = Collections.emptyMap();
  private final ByteArrayOutputStream requestBuffer;
  private InputStream inputStream = null;
  private CloseableHttpResponse response = null;
  private static final Map DEFAULT_HEADERS =
      Collections.unmodifiableMap(getDefaultHeaders());

  public DatabricksHttpTTransport(DatabricksHttpClient httpClient, String url) {
    this.httpClient = httpClient;
    this.httpClient.closeExpiredAndIdleConnections();
    this.url = url;
    this.requestBuffer = new ByteArrayOutputStream();
  }

  @Override
  public boolean isOpen() {
    // HTTP Client doesn't maintain an open connection.
    return true;
  }

  @Override
  public void open() throws TTransportException {
    // Opening is not required for HTTP transport
  }

  @Override
  public void close() {
    this.httpClient.closeExpiredAndIdleConnections();
    if (inputStream != null) {
      try {
        inputStream.close();
      } catch (IOException e) {
        LoggingUtil.log(
            LogLevel.ERROR,
            String.format("Failed to close inputStream with error {%s}. Skipping the close.", e));
      }
      inputStream = null;
    }
    if (response != null) {
      try {
        response.close();
      } catch (IOException e) {
        LoggingUtil.log(
            LogLevel.ERROR,
            String.format("Failed to close response with error {%s}", e.toString()));
      }
      response = null;
    }
  }

  public void setCustomHeaders(Map headers) {
    if (headers != null) {
      customHeaders = new HashMap<>(headers);
    } else {
      customHeaders = Collections.emptyMap();
    }
  }

  @VisibleForTesting
  Map getCustomHeaders() {
    return customHeaders;
  }

  @VisibleForTesting
  InputStream getInputStream() {
    return inputStream;
  }

  @VisibleForTesting
  void setInputStream(InputStream inputStream) {
    this.inputStream = inputStream;
  }

  @Override
  public int read(byte[] buf, int off, int len) throws TTransportException {
    if (inputStream == null) {
      throw new TTransportException("Response buffer is empty, no request.");
    }
    try {
      int ret = inputStream.read(buf, off, len);
      if (ret == -1) {
        throw new TTransportException("No more data available.");
      }
      return ret;
    } catch (IOException e) {
      LoggingUtil.log(
          LogLevel.ERROR,
          String.format("Failed to read inputStream with error {%s}", e.toString()));
      throw new TTransportException(e);
    }
  }

  @Override
  public void write(byte[] buf, int off, int len) {
    requestBuffer.write(buf, off, len);
  }

  @Override
  public void flush() throws TTransportException {
    try {
      HttpPost request = new HttpPost(this.url);
      DEFAULT_HEADERS.forEach(request::addHeader);
      if (customHeaders != null) {
        customHeaders.forEach(request::addHeader);
      }
      request.setEntity(new ByteArrayEntity(requestBuffer.toByteArray()));
      response = httpClient.execute(request);
      ValidationUtil.checkHTTPError(response);
      inputStream = response.getEntity().getContent();
      requestBuffer.reset();
    } catch (DatabricksHttpException | IOException e) {
      Throwable cause = e;
      while (cause != null) {
        if (cause instanceof DatabricksHttpException) {
          throw new TTransportException(
              TTransportException.UNKNOWN, "Failed to flush data to server: " + cause.getMessage());
        }
        cause = cause.getCause();
      }
      httpClient.closeExpiredAndIdleConnections();

      String errorMessage = "Failed to flush data to server: " + e.getMessage();
      LoggingUtil.log(LogLevel.ERROR, errorMessage);
      throw new TTransportException(TTransportException.UNKNOWN, errorMessage);
    }
  }

  @Override
  public TConfiguration getConfiguration() {
    return null;
  }

  @Override
  public void updateKnownMessageSize(long size) throws TTransportException {}

  @Override
  public void checkReadBytesAvailable(long numBytes) throws TTransportException {}

  private static Map getDefaultHeaders() {
    Map headers = new HashMap<>();
    headers.put("Content-Type", "application/x-thrift");
    headers.put("Accept", "application/x-thrift");
    return headers;
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy