All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.datastax.driver.core.SessionManager Maven / Gradle / Ivy

Go to download

A driver for DataStax Enterprise (DSE) and Apache Cassandra 1.2+ clusters that works exclusively with the Cassandra Query Language version 3 (CQL3) and Cassandra's binary protocol, supporting DSE-specific features such as geospatial types, DSE Graph and DSE authentication.

There is a newer version: 2.4.0
Show newest version
/*
 * Copyright DataStax, Inc.
 *
 * This software can be used solely with DataStax Enterprise. Please consult the license at
 * http://www.datastax.com/terms/datastax-dse-driver-license-terms
 */
package com.datastax.driver.core;

import com.datastax.driver.core.Message.Response;
import com.datastax.driver.core.exceptions.DriverInternalError;
import com.datastax.driver.core.exceptions.InvalidQueryException;
import com.datastax.driver.core.exceptions.UnsupportedFeatureException;
import com.datastax.driver.core.exceptions.UnsupportedProtocolVersionException;
import com.datastax.driver.core.policies.LoadBalancingPolicy;
import com.datastax.driver.core.policies.ReconnectionPolicy;
import com.datastax.driver.core.policies.SpeculativeExecutionPolicy;
import com.datastax.driver.core.utils.MoreFutures;
import com.google.common.base.Functions;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.AsyncFunction;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/** Driver implementation of the Session interface. */
class SessionManager extends AbstractSession {

  private static final Logger logger = LoggerFactory.getLogger(Session.class);
  private static final long STATUS_EVENT_DELAY_MILLIS = 300000L;

  final Cluster cluster;
  final ConcurrentMap pools;
  final HostConnectionPool.PoolState poolsState;
  private final AtomicReference> initFuture =
      new AtomicReference>();
  final AtomicReference closeFuture = new AtomicReference();
  private final InsightsClient insightsClient;

  private volatile boolean isInit;
  private volatile boolean isClosing;

  // Package protected, only Cluster should construct that.
  SessionManager(final Cluster cluster) {
    this.cluster = cluster;
    this.pools = new ConcurrentHashMap();
    this.poolsState = new HostConnectionPool.PoolState();
    InsightsConfiguration insightsConfiguration =
        new InsightsConfiguration(
            cluster.getConfiguration().getInsightsOptions().isMonitorReportingEnabled(),
            STATUS_EVENT_DELAY_MILLIS);
    StackTraceElement[] initCallerStackTrace = {};
    try {
      initCallerStackTrace = Thread.currentThread().getStackTrace();
    } catch (Exception ex) {
      // ignore and fallback to empty
    }

    this.insightsClient =
        InsightsClient.createInsightsClient(
            cluster, insightsConfiguration, this, initCallerStackTrace);
  }

  @Override
  public Session init() {
    try {
      return Uninterruptibles.getUninterruptibly(initAsync());
    } catch (ExecutionException e) {
      throw DriverThrowables.propagateCause(e);
    }
  }

  @Override
  public ListenableFuture initAsync() {
    // If we haven't initialized the cluster, do it now
    cluster.init();

    ListenableFuture existing = initFuture.get();
    if (existing != null) return existing;

    final SettableFuture myInitFuture = SettableFuture.create();
    if (!initFuture.compareAndSet(null, myInitFuture)) return initFuture.get();

    Collection hosts = cluster.getMetadata().allHosts();
    ListenableFuture allPoolsCreatedFuture = createPools(hosts);
    ListenableFuture allPoolsUpdatedFuture =
        GuavaCompatibility.INSTANCE.transformAsync(
            allPoolsCreatedFuture,
            new AsyncFunction() {
              @Override
              @SuppressWarnings("unchecked")
              public ListenableFuture apply(Object input) throws Exception {
                isInit = true;
                return (ListenableFuture) updateCreatedPools();
              }
            });

    GuavaCompatibility.INSTANCE.addCallback(
        allPoolsUpdatedFuture,
        new FutureCallback() {
          @Override
          public void onSuccess(Object result) {
            myInitFuture.set(SessionManager.this);
            insightsClient.sendStartupMessage();
            insightsClient.scheduleStatusMessageSend();
          }

          @Override
          public void onFailure(Throwable t) {
            SessionManager.this.closeAsync(); // don't leak the session
            myInitFuture.setException(t);
          }
        });

    return myInitFuture;
  }

  private ListenableFuture createPools(Collection hosts) {
    List> futures = Lists.newArrayListWithCapacity(hosts.size());
    for (Host host : hosts)
      if (host.state != Host.State.DOWN) futures.add(maybeAddPool(host, null));
    return Futures.allAsList(futures);
  }

  @Override
  public String getLoggedKeyspace() {
    return poolsState.keyspace;
  }

  @Override
  public ResultSetFuture executeAsync(final Statement statement) {
    if (isInit) {
      DefaultResultSetFuture future =
          new DefaultResultSetFuture(
              this, cluster.manager.protocolVersion(), makeRequestMessage(statement, null));
      execute(future, statement);
      return future;
    } else {
      // If the session is not initialized, we can't call makeRequestMessage() synchronously,
      // because it
      // requires internal Cluster state that might not be initialized yet (like the protocol
      // version).
      // Because of the way the future is built, we need another 'proxy' future that we can return
      // now.
      final ChainedResultSetFuture chainedFuture = new ChainedResultSetFuture();
      this.initAsync()
          .addListener(
              new Runnable() {
                @Override
                public void run() {
                  DefaultResultSetFuture actualFuture =
                      new DefaultResultSetFuture(
                          SessionManager.this,
                          cluster.manager.protocolVersion(),
                          makeRequestMessage(statement, null));
                  execute(actualFuture, statement);
                  chainedFuture.setSource(actualFuture);
                }
              },
              executor());
      return chainedFuture;
    }
  }

  @Override
  public ListenableFuture executeContinuouslyAsync(
      final Statement statement, final ContinuousPagingOptions options) {
    Preconditions.checkNotNull(options, "Options must not be null");
    int maxPagesPerSecond = options.getMaxPagesPerSecond();
    if (maxPagesPerSecond > 0) {
      long timeoutMillis =
          (statement.getReadTimeoutMillis() >= 0)
              ? statement.getReadTimeoutMillis()
              : configuration().getSocketOptions().getReadTimeoutMillis();
      if (timeoutMillis < 1000 / maxPagesPerSecond) {
        logger.warn(
            "Running a query with {} page(s)/second but the read timeout is {} ms, the query will necessarily time out",
            maxPagesPerSecond,
            timeoutMillis);
      }
    }
    final SettableFuture result = SettableFuture.create();
    final ProtocolVersion protocolVersion = cluster.manager.protocolVersion();
    if (isInit) {
      ContinuousPagingQueue queue =
          new ContinuousPagingQueue(
              makeRequestMessage(statement, null, options), protocolVersion, result);
      MultiResponseRequestHandler handler = new MultiResponseRequestHandler(this, queue, statement);
      handler.sendRequest();
    } else {
      this.initAsync()
          .addListener(
              new Runnable() {
                @Override
                public void run() {
                  ContinuousPagingQueue queue =
                      new ContinuousPagingQueue(
                          makeRequestMessage(statement, null, options), protocolVersion, result);
                  MultiResponseRequestHandler handler =
                      new MultiResponseRequestHandler(SessionManager.this, queue, statement);
                  handler.sendRequest();
                }
              },
              executor());
    }
    return result;
  }

  @Override
  public ContinuousPagingResult executeContinuously(
      final Statement statement, final ContinuousPagingOptions options) {
    Preconditions.checkNotNull(options, "Options must not be null");
    return new DefaultContinuousPagingResult(executeContinuouslyAsync(statement, options));
  }

  protected ListenableFuture prepareAsync(
      String query, String keyspace, Map customPayload) {
    Requests.Prepare request = new Requests.Prepare(query, keyspace);
    request.setCustomPayload(customPayload);
    Connection.Future future = new Connection.Future(request);
    execute(future, Statement.DEFAULT);
    return toPreparedStatement(query, keyspace, future);
  }

  @Override
  public CloseFuture closeAsync() {
    CloseFuture future = closeFuture.get();
    if (future != null) return future;

    isClosing = true;
    cluster.manager.removeSession(this);

    List futures = new ArrayList(pools.size());
    for (HostConnectionPool pool : pools.values()) futures.add(pool.closeAsync());

    future = new CloseFuture.Forwarding(futures);
    insightsClient.shutdown();

    return closeFuture.compareAndSet(null, future)
        ? future
        : closeFuture.get(); // We raced, it's ok, return the future that was actually set
  }

  @Override
  public boolean isClosed() {
    return closeFuture.get() != null;
  }

  @Override
  public Cluster getCluster() {
    return cluster;
  }

  @Override
  public Session.State getState() {
    return new State(this);
  }

  private ListenableFuture toPreparedStatement(
      final String query, final String keyspace, final Connection.Future future) {
    return GuavaCompatibility.INSTANCE.transformAsync(
        future,
        new AsyncFunction() {
          @Override
          public ListenableFuture apply(Response response) {
            switch (response.type) {
              case RESULT:
                Responses.Result rm = (Responses.Result) response;
                switch (rm.kind) {
                  case PREPARED:
                    Responses.Result.Prepared pmsg = (Responses.Result.Prepared) rm;
                    String keyspaceToUse = poolsState.keyspace;
                    // Only use given keyspace if its different than the session keyspace and the
                    // protocol
                    // supports it.
                    if (keyspace != null
                        && !keyspace.equals(keyspaceToUse)
                        && ProtocolFeature.KEYSPACE_ON_QUERY.isSupportedBy(
                            cluster.manager.protocolVersion())) {
                      keyspaceToUse = keyspace;
                    }
                    PreparedStatement stmt =
                        DefaultPreparedStatement.fromMessage(pmsg, cluster, query, keyspaceToUse);
                    stmt = cluster.manager.addPrepared(stmt);
                    if (cluster.getConfiguration().getQueryOptions().isPrepareOnAllHosts()) {
                      // All Sessions are connected to the same nodes so it's enough to prepare only
                      // the nodes of this session.
                      // If that changes, we'll have to make sure this propagate to other sessions
                      // too.
                      return prepare(stmt, future.getEndPoint());
                    } else {
                      return Futures.immediateFuture(stmt);
                    }
                  default:
                    return Futures.immediateFailedFuture(
                        new DriverInternalError(
                            String.format(
                                "%s response received when prepared statement was expected",
                                rm.kind)));
                }
              case ERROR:
                return Futures.immediateFailedFuture(
                    ((Responses.Error) response).asException(future.getEndPoint()));
              default:
                return Futures.immediateFailedFuture(
                    new DriverInternalError(
                        String.format(
                            "%s response received when prepared statement was expected",
                            response.type)));
            }
          }
        },
        executor());
  }

  Connection.Factory connectionFactory() {
    return cluster.manager.connectionFactory;
  }

  Configuration configuration() {
    return cluster.manager.configuration;
  }

  LoadBalancingPolicy loadBalancingPolicy() {
    return cluster.manager.loadBalancingPolicy();
  }

  SpeculativeExecutionPolicy speculativeExecutionPolicy() {
    return cluster.manager.speculativeExecutionPolicy();
  }

  ReconnectionPolicy reconnectionPolicy() {
    return cluster.manager.reconnectionPolicy();
  }

  ListeningExecutorService executor() {
    return cluster.manager.executor;
  }

  ListeningExecutorService blockingExecutor() {
    return cluster.manager.blockingExecutor;
  }

  // Returns whether there was problem creating the pool
  ListenableFuture forceRenewPool(final Host host, Connection reusedConnection) {
    final HostDistance distance = cluster.manager.loadBalancingPolicy().distance(host);
    if (distance == HostDistance.IGNORED) return Futures.immediateFuture(true);

    if (isClosing) return Futures.immediateFuture(false);

    final HostConnectionPool newPool = new HostConnectionPool(host, distance, this);
    ListenableFuture poolInitFuture = newPool.initAsync(reusedConnection);

    final SettableFuture future = SettableFuture.create();

    GuavaCompatibility.INSTANCE.addCallback(
        poolInitFuture,
        new FutureCallback() {
          @Override
          public void onSuccess(Void result) {
            HostConnectionPool previous = pools.put(host, newPool);
            if (previous == null) {
              logger.debug("Added connection pool for {}", host);
            } else {
              logger.debug("Renewed connection pool for {}", host);
              previous.closeAsync();
            }

            // If we raced with a session shutdown, ensure that the pool will be closed.
            if (isClosing) {
              newPool.closeAsync();
              pools.remove(host);
              future.set(false);
            } else {
              future.set(true);
            }
          }

          @Override
          public void onFailure(Throwable t) {
            logger.warn("Error creating pool to " + host, t);
            future.set(false);
          }
        });

    return future;
  }

  // Replace pool for a given host only if it's the given previous value (which can be null)
  // This returns a future if the replacement was successful, or null if we raced.
  private ListenableFuture replacePool(
      final Host host,
      HostDistance distance,
      HostConnectionPool previous,
      Connection reusedConnection) {
    if (isClosing) return MoreFutures.VOID_SUCCESS;

    final HostConnectionPool newPool = new HostConnectionPool(host, distance, this);
    if (previous == null) {
      if (pools.putIfAbsent(host, newPool) != null) {
        return null;
      }
    } else {
      if (!pools.replace(host, previous, newPool)) {
        return null;
      }
      if (!previous.isClosed()) {
        logger.warn(
            "Replacing a pool that wasn't closed. Closing it now, but this was not expected.");
        previous.closeAsync();
      }
    }

    ListenableFuture poolInitFuture = newPool.initAsync(reusedConnection);

    GuavaCompatibility.INSTANCE.addCallback(
        poolInitFuture,
        new FutureCallback() {
          @Override
          public void onSuccess(Void result) {
            // If we raced with a session shutdown, ensure that the pool will be closed.
            if (isClosing) {
              newPool.closeAsync();
              pools.remove(host);
            }
          }

          @Override
          public void onFailure(Throwable t) {
            pools.remove(host);
          }
        });
    return poolInitFuture;
  }

  // Returns whether there was problem creating the pool
  ListenableFuture maybeAddPool(final Host host, Connection reusedConnection) {
    final HostDistance distance = cluster.manager.loadBalancingPolicy().distance(host);
    if (distance == HostDistance.IGNORED) return Futures.immediateFuture(true);

    HostConnectionPool previous = pools.get(host);
    if (previous != null && !previous.isClosed()) return Futures.immediateFuture(true);

    while (true) {
      previous = pools.get(host);
      if (previous != null && !previous.isClosed()) return Futures.immediateFuture(true);

      final SettableFuture future = SettableFuture.create();
      ListenableFuture newPoolInit = replacePool(host, distance, previous, reusedConnection);
      if (newPoolInit != null) {
        GuavaCompatibility.INSTANCE.addCallback(
            newPoolInit,
            new FutureCallback() {
              @Override
              public void onSuccess(Void result) {
                logger.debug("Added connection pool for {}", host);
                future.set(true);
              }

              @Override
              public void onFailure(Throwable t) {
                if (t instanceof UnsupportedProtocolVersionException) {
                  cluster.manager.logUnsupportedVersionProtocol(
                      host, ((UnsupportedProtocolVersionException) t).getUnsupportedVersion());
                  cluster.manager.triggerOnDown(host, false);
                } else if (t instanceof ClusterNameMismatchException) {
                  ClusterNameMismatchException e = (ClusterNameMismatchException) t;
                  cluster.manager.logClusterNameMismatch(
                      host, e.expectedClusterName, e.actualClusterName);
                  cluster.manager.triggerOnDown(host, false);
                } else {
                  logger.warn("Error creating pool to " + host, t);
                  // do not mark the host down, as there could be other connections to it
                  // (e.g. the control connection, or another session pool).
                  // The conviction policy will mark it down if it has no more active connections.
                }
                // propagate errors; for all other exceptions, consider the pool init failed
                // but allow the session init process to continue normally
                if (t instanceof Error) future.setException(t);
                else future.set(false);
              }
            });
        return future;
      }
    }
  }

  CloseFuture removePool(Host host) {
    final HostConnectionPool pool = pools.remove(host);
    return pool == null ? CloseFuture.immediateFuture() : pool.closeAsync();
  }

  /*
   * When the set of live nodes change, the loadbalancer will change his
   * mind on host distances. It might change it on the node that came/left
   * but also on other nodes (for instance, if a node dies, another
   * previously ignored node may be now considered).
   *
   * This method ensures that all hosts for which a pool should exist
   * have one, and hosts that shouldn't don't.
   */
  ListenableFuture updateCreatedPools() {
    // This method does nothing during initialization. Some hosts may be non-responsive but not yet
    // marked DOWN; if
    // we execute the code below we would try to create their pool over and over again.
    // It's called explicitly at the end of init(), once isInit has been set to true.
    if (!isInit) return MoreFutures.VOID_SUCCESS;

    // We do 2 iterations, so that we add missing pools first, and them remove all unecessary pool
    // second.
    // That way, we'll avoid situation where we'll temporarily lose connectivity
    final List toRemove = new ArrayList();
    List> poolCreatedFutures = Lists.newArrayList();

    for (Host h : cluster.getMetadata().allHosts()) {
      HostDistance dist = loadBalancingPolicy().distance(h);
      HostConnectionPool pool = pools.get(h);

      if (pool == null) {
        if (dist != HostDistance.IGNORED && h.state == Host.State.UP)
          poolCreatedFutures.add(maybeAddPool(h, null));
      } else if (dist != pool.hostDistance) {
        if (dist == HostDistance.IGNORED) {
          toRemove.add(h);
        } else {
          pool.hostDistance = dist;
          pool.ensureCoreConnections();
        }
      }
    }

    // Wait pool creation before removing, so we don't lose connectivity
    ListenableFuture allPoolsCreatedFuture = Futures.allAsList(poolCreatedFutures);

    return GuavaCompatibility.INSTANCE.transformAsync(
        allPoolsCreatedFuture,
        new AsyncFunction>() {
          @Override
          public ListenableFuture> apply(Object input) throws Exception {
            List> poolRemovedFuture =
                Lists.newArrayListWithCapacity(toRemove.size());
            for (Host h : toRemove) poolRemovedFuture.add(removePool(h));

            return Futures.successfulAsList(poolRemovedFuture);
          }
        });
  }

  void updateCreatedPools(Host h) {
    HostDistance dist = loadBalancingPolicy().distance(h);
    HostConnectionPool pool = pools.get(h);

    try {
      if (pool == null) {
        if (dist != HostDistance.IGNORED && h.state == Host.State.UP) maybeAddPool(h, null).get();
      } else if (dist != pool.hostDistance) {
        if (dist == HostDistance.IGNORED) {
          removePool(h).get();
        } else {
          pool.hostDistance = dist;
          pool.ensureCoreConnections();
        }
      }
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
    } catch (ExecutionException e) {
      Throwable cause = e.getCause();
      logger.error("Unexpected error while refreshing connection pools", cause);
      if (cause instanceof Error) throw ((Error) cause);
    }
  }

  void onDown(Host host) throws InterruptedException, ExecutionException {
    // Note that with well behaved balancing policy (that ignore dead nodes), the removePool call is
    // not necessary
    // since updateCreatedPools should take care of it. But better protect against non well behaving
    // policies.
    removePool(host).force().get();
    updateCreatedPools().get();
  }

  void onRemove(Host host) throws InterruptedException, ExecutionException {
    onDown(host);
  }

  Message.Request makeRequestMessage(Statement statement, ByteBuffer pagingState) {
    return makeRequestMessage(statement, pagingState, null, null);
  }

  Message.Request makeRequestMessage(
      Statement statement,
      ByteBuffer pagingState,
      ContinuousPagingOptions continuousPagingOptions) {
    return makeRequestMessage(statement, pagingState, continuousPagingOptions, null);
  }

  Message.Request makeRequestMessage(
      Statement statement,
      ByteBuffer pagingState,
      ContinuousPagingOptions continuousPagingOptions,
      String keyspace) {
    // We need the protocol version, which is only available once the cluster has initialized.
    // Initialize the session to ensure this is the case.
    // init() locks, so avoid if we know we don't need it.
    if (!isInit) init();
    ProtocolVersion protocolVersion = cluster.manager.protocolVersion();
    CodecRegistry codecRegistry = cluster.manager.configuration.getCodecRegistry();

    ConsistencyLevel consistency = statement.getConsistencyLevel();
    if (consistency == null) consistency = configuration().getQueryOptions().getConsistencyLevel();

    ConsistencyLevel serialConsistency = statement.getSerialConsistencyLevel();
    if (protocolVersion.compareTo(ProtocolVersion.V3) < 0 && statement instanceof BatchStatement) {
      if (serialConsistency != null)
        throw new UnsupportedFeatureException(
            protocolVersion, "Serial consistency on batch statements is not supported");
    } else if (serialConsistency == null)
      serialConsistency = configuration().getQueryOptions().getSerialConsistencyLevel();

    if (statement.getOutgoingPayload() != null && protocolVersion.compareTo(ProtocolVersion.V4) < 0)
      throw new UnsupportedFeatureException(
          protocolVersion, "Custom payloads are only supported since native protocol V4");

    long defaultTimestamp = Long.MIN_VALUE;
    if (protocolVersion.compareTo(ProtocolVersion.V3) >= 0) {
      defaultTimestamp = statement.getDefaultTimestamp();
      if (defaultTimestamp == Long.MIN_VALUE)
        defaultTimestamp = cluster.getConfiguration().getPolicies().getTimestampGenerator().next();
    }

    int fetchSize = statement.getFetchSize();
    ByteBuffer usedPagingState = pagingState;

    if (protocolVersion == ProtocolVersion.V1) {
      assert pagingState == null;
      // We don't let the user change the fetchSize globally if the proto v1 is used, so we just
      // need to
      // check for the case of a per-statement override
      if (fetchSize <= 0) fetchSize = -1;
      else if (fetchSize != Integer.MAX_VALUE)
        throw new UnsupportedFeatureException(protocolVersion, "Paging is not supported");
    } else if (fetchSize <= 0) {
      fetchSize = configuration().getQueryOptions().getFetchSize();
    }

    if (fetchSize == Integer.MAX_VALUE) fetchSize = -1;

    // Override page size with continuous paging size if enabled
    if (continuousPagingOptions != null) fetchSize = continuousPagingOptions.getPageSize();

    if (pagingState == null) {
      usedPagingState = statement.getPagingState();
    }

    if (protocolVersion.compareTo(ProtocolVersion.DSE_V1) < 0 && continuousPagingOptions != null)
      throw new UnsupportedFeatureException(
          protocolVersion, "Continuous paging is only supported since native protocol DSE_V1");

    if (statement instanceof StatementWrapper)
      statement = ((StatementWrapper) statement).getWrappedStatement();

    Message.Request request;

    if (statement instanceof RegularStatement) {
      RegularStatement rs = (RegularStatement) statement;

      // It saddens me that we special case for the query builder here, but for now this is simpler.
      // We could provide a general API in RegularStatement instead at some point but it's unclear
      // what's
      // the cleanest way to do that is right now (and it's probably not really that useful anyway).
      if (protocolVersion == ProtocolVersion.V1
          && rs instanceof com.datastax.driver.core.querybuilder.BuiltStatement)
        ((com.datastax.driver.core.querybuilder.BuiltStatement) rs).setForceNoValues(true);

      ByteBuffer[] rawPositionalValues = rs.getValues(protocolVersion, codecRegistry);
      Map rawNamedValues = rs.getNamedValues(protocolVersion, codecRegistry);

      if (protocolVersion == ProtocolVersion.V1
          && (rawPositionalValues != null || rawNamedValues != null))
        throw new UnsupportedFeatureException(protocolVersion, "Binary values are not supported");

      if (protocolVersion == ProtocolVersion.V2 && rawNamedValues != null)
        throw new UnsupportedFeatureException(protocolVersion, "Named values are not supported");

      ByteBuffer[] positionalValues =
          rawPositionalValues == null ? Requests.EMPTY_BB_ARRAY : rawPositionalValues;
      Map namedValues =
          rawNamedValues == null ? Collections.emptyMap() : rawNamedValues;

      String qString = rs.getQueryString(codecRegistry);

      Requests.QueryProtocolOptions options =
          new Requests.QueryProtocolOptions(
              Message.Request.Type.QUERY,
              consistency,
              positionalValues,
              namedValues,
              false,
              fetchSize,
              usedPagingState,
              serialConsistency,
              defaultTimestamp,
              statement.getKeyspace(),
              continuousPagingOptions);
      request = new Requests.Query(qString, options, statement.isTracing());
    } else if (statement instanceof BoundStatement) {
      BoundStatement bs = (BoundStatement) statement;
      if (!cluster.manager.preparedQueries.containsKey(
          bs.statement.getPreparedId().boundValuesMetadata.id)) {
        throw new InvalidQueryException(
            String.format(
                "Tried to execute unknown prepared query : %s. "
                    + "You may have used a PreparedStatement that was created with another Cluster instance.",
                bs.statement.getPreparedId().boundValuesMetadata.id));
      }
      if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) bs.ensureAllSet();

      // skip resultset metadata if version > 1 (otherwise this feature is not supported)
      // and if we already have metadata for the prepared statement being executed.
      boolean skipMetadata =
          protocolVersion != ProtocolVersion.V1
              && bs.statement.getPreparedId().resultSetMetadata.variables != null;
      Requests.QueryProtocolOptions options =
          new Requests.QueryProtocolOptions(
              Message.Request.Type.EXECUTE,
              consistency,
              bs.wrapper.values,
              Collections.emptyMap(),
              skipMetadata,
              fetchSize,
              usedPagingState,
              serialConsistency,
              defaultTimestamp,
              statement.getKeyspace(),
              continuousPagingOptions);
      request =
          new Requests.Execute(
              bs.statement.getPreparedId().boundValuesMetadata.id,
              bs.statement.getPreparedId().resultSetMetadata.id,
              options,
              statement.isTracing());
    } else {
      assert statement instanceof BatchStatement : statement;
      assert pagingState == null;

      if (protocolVersion == ProtocolVersion.V1)
        throw new UnsupportedFeatureException(
            protocolVersion, "Protocol level batching is not supported");

      BatchStatement bs = (BatchStatement) statement;
      if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) bs.ensureAllSet();
      BatchStatement.IdAndValues idAndVals = bs.getIdAndValues(protocolVersion, codecRegistry);
      Requests.BatchProtocolOptions options =
          new Requests.BatchProtocolOptions(
              consistency, serialConsistency, defaultTimestamp, bs.getKeyspace());
      request =
          new Requests.Batch(
              bs.batchType, idAndVals.ids, idAndVals.values, options, statement.isTracing());
    }

    request.setCustomPayload(statement.getOutgoingPayload());
    return request;
  }

  /**
   * Execute the provided request.
   *
   * 

This method will find a suitable node to connect to using the {@link LoadBalancingPolicy} * and handle host failover. */ void execute(final RequestHandler.Callback callback, final Statement statement) { if (this.isClosed()) { callback.onException( null, new IllegalStateException("Could not send request, session is closed"), 0, 0); return; } if (isInit) new RequestHandler(this, callback, statement).sendRequest(); else this.initAsync() .addListener( new Runnable() { @Override public void run() { new RequestHandler(SessionManager.this, callback, statement).sendRequest(); } }, executor()); } private ListenableFuture prepare( final PreparedStatement statement, EndPoint toExclude) { final String query = statement.getQueryString(); final String keyspace = statement.getQueryKeyspace(); List> futures = Lists.newArrayListWithExpectedSize(pools.size()); for (final Map.Entry entry : pools.entrySet()) { if (entry.getKey().getEndPoint().equals(toExclude)) continue; try { // Preparing is not critical: if it fails, it will fix itself later when the user tries to // execute // the prepared query. So don't wait if no connection is available, simply abort. ListenableFuture connectionFuture = entry.getValue().borrowConnection(0, TimeUnit.MILLISECONDS, 0); ListenableFuture prepareFuture = GuavaCompatibility.INSTANCE.transformAsync( connectionFuture, new AsyncFunction() { @Override public ListenableFuture apply(final Connection c) throws Exception { Connection.Future responseFuture = c.write(new Requests.Prepare(query, keyspace)); GuavaCompatibility.INSTANCE.addCallback( responseFuture, new FutureCallback() { @Override public void onSuccess(Response result) { c.release(); } @Override public void onFailure(Throwable t) { logger.debug( String.format( "Unexpected error while preparing query (%s) on %s", query, entry.getKey()), t); c.release(); } }); return responseFuture; } }); futures.add(prepareFuture); } catch (Exception e) { // Again, not being able to prepare the query right now is no big deal, so just ignore } } // Return the statement when all futures are done return GuavaCompatibility.INSTANCE.transform( Futures.successfulAsList(futures), Functions.constant(statement)); } ResultSetFuture executeQuery(Message.Request msg, Statement statement) { DefaultResultSetFuture future = new DefaultResultSetFuture( this, configuration().getProtocolOptions().getProtocolVersion(), msg); execute(future, statement); return future; } void cleanupIdleConnections(long now) { for (HostConnectionPool pool : pools.values()) { pool.cleanupIdleConnections(now); } } private static class State implements Session.State { private final SessionManager session; private final List connectedHosts; private final int[] openConnections; private final int[] trashedConnections; private final int[] inFlightQueries; private State(SessionManager session) { this.session = session; this.connectedHosts = ImmutableList.copyOf(session.pools.keySet()); this.openConnections = new int[connectedHosts.size()]; this.trashedConnections = new int[connectedHosts.size()]; this.inFlightQueries = new int[connectedHosts.size()]; int i = 0; for (Host h : connectedHosts) { HostConnectionPool p = session.pools.get(h); // It's possible we race and the host has been removed since the beginning of this // functions. In that case, the fact it's part of getConnectedHosts() but has no opened // connections will be slightly weird, but it's unlikely enough that we don't bother // avoiding. if (p == null) { openConnections[i] = 0; trashedConnections[i] = 0; inFlightQueries[i] = 0; continue; } openConnections[i] = p.opened(); inFlightQueries[i] = p.totalInFlight.get(); trashedConnections[i] = p.trashed(); i++; } } private int getIdx(Host h) { // We guarantee that we only ever create one Host object per-address, which means that '==' // comparison is a proper way to test Host equality. Given that, the number of hosts // per-session will always be small enough (even 1000 is kind of small and even with a 1000+ // node cluster, you probably don't want a Session to connect to all of them) that iterating // over connectedHosts will never be much more inefficient than keeping a // Map. And it's less garbage/memory consumption so... for (int i = 0; i < connectedHosts.size(); i++) if (h == connectedHosts.get(i)) return i; return -1; } @Override public Session getSession() { return session; } @Override public Collection getConnectedHosts() { return connectedHosts; } @Override public int getOpenConnections(Host host) { int i = getIdx(host); return i < 0 ? 0 : openConnections[i]; } @Override public int getTrashedConnections(Host host) { int i = getIdx(host); return i < 0 ? 0 : trashedConnections[i]; } @Override public int getInFlightQueries(Host host) { int i = getIdx(host); return i < 0 ? 0 : inFlightQueries[i]; } } }