All Downloads are FREE. Search and download functionalities are using the official Maven repository.

nl.topicus.jdbc.shaded.com.google.cloud.spanner.SpannerImpl Maven / Gradle / Ivy

There is a newer version: 1.1.6
Show newest version
/*
 * Copyright 2017 Google Inc. All Rights Reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in nl.topicus.jdbc.shaded.com.liance with the License.
 * You may obtain a copy of the License at
 *
 *       http://www.apache.nl.topicus.jdbc.shaded.org.licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package nl.topicus.jdbc.shaded.com.google.cloud.spanner;

import static nl.topicus.jdbc.shaded.com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException;
import static nl.topicus.jdbc.shaded.com.google.cloud.spanner.SpannerExceptionFactory.newSpannerExceptionForCancellation;
import static nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.base.Preconditions.checkArgument;
import static nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.base.Preconditions.checkNotNull;
import static nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.base.Preconditions.checkState;

import nl.topicus.jdbc.shaded.com.google.api.client.util.BackOff;
import nl.topicus.jdbc.shaded.com.google.api.client.util.ExponentialBackOff;
import nl.topicus.jdbc.shaded.com.google.api.gax.paging.Page;
import nl.topicus.jdbc.shaded.com.google.api.pathtemplate.PathTemplate;
import nl.topicus.jdbc.shaded.com.google.cloud.BaseService;
import nl.topicus.jdbc.shaded.com.google.cloud.ByteArray;
import nl.topicus.jdbc.shaded.com.google.cloud.Date;
import nl.topicus.jdbc.shaded.com.google.cloud.PageImpl;
import nl.topicus.jdbc.shaded.com.google.cloud.PageImpl.NextPageFetcher;
import nl.topicus.jdbc.shaded.com.google.cloud.Timestamp;
import nl.topicus.jdbc.shaded.com.google.cloud.spanner.Operation.Parser;
import nl.topicus.jdbc.shaded.com.google.cloud.spanner.Options.ListOption;
import nl.topicus.jdbc.shaded.com.google.cloud.spanner.Options.QueryOption;
import nl.topicus.jdbc.shaded.com.google.cloud.spanner.Options.ReadOption;
import nl.topicus.jdbc.shaded.com.google.cloud.spanner.spi.v1.SpannerRpc;
import nl.topicus.jdbc.shaded.com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.annotations.VisibleForTesting;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.base.Function;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.base.Preconditions;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.base.Throwables;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.collect.AbstractIterator;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.collect.ImmutableMap;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.collect.Lists;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.collect.Maps;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.util.concurrent.Futures;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.util.concurrent.ListenableFuture;
import nl.topicus.jdbc.shaded.com.google.nl.topicus.jdbc.shaded.com.on.util.concurrent.Uninterruptibles;
import nl.topicus.jdbc.shaded.com.google.protobuf.Any;
import nl.topicus.jdbc.shaded.com.google.protobuf.ByteString;
import nl.topicus.jdbc.shaded.com.google.protobuf.FieldMask;
import nl.topicus.jdbc.shaded.com.google.protobuf.InvalidProtocolBufferException;
import nl.topicus.jdbc.shaded.com.google.protobuf.ListValue;
import nl.topicus.jdbc.shaded.com.google.protobuf.Message;
import nl.topicus.jdbc.shaded.com.google.protobuf.Value.KindCase;
import nl.topicus.jdbc.shaded.com.google.spanner.admin.database.v1.CreateDatabaseMetadata;
import nl.topicus.jdbc.shaded.com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata;
import nl.topicus.jdbc.shaded.com.google.spanner.admin.instance.v1.CreateInstanceMetadata;
import nl.topicus.jdbc.shaded.com.google.spanner.admin.instance.v1.UpdateInstanceMetadata;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.BeginTransactionRequest;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.CommitRequest;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.CommitResponse;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.ExecuteSqlRequest;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.ExecuteSqlRequest.QueryMode;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.PartialResultSet;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.ReadRequest;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.ResultSetMetadata;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.ResultSetStats;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.RollbackRequest;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.Transaction;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.TransactionOptions;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.TransactionSelector;
import nl.topicus.jdbc.shaded.com.google.spanner.v1.TypeCode;
import nl.topicus.jdbc.shaded.io.grpc.Context;
import nl.topicus.jdbc.shaded.io.grpc.ManagedChannel;
import java.nl.topicus.jdbc.shaded.io.IOException;
import java.nl.topicus.jdbc.shaded.io.Serializable;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import nl.topicus.jdbc.shaded.javax.annotation.Nullable;
import nl.topicus.jdbc.shaded.javax.annotation.concurrent.GuardedBy;

/** Default implementation of the Cloud Spanner interface. */
class SpannerImpl extends BaseService implements Spanner {
  private static final int MIN_BACKOFF_MS = 1000;
  private static final int MAX_BACKOFF_MS = 32000;
  private static final PathTemplate OP_NAME_TEMPLATE =
      PathTemplate.create(
          "projects/{project}/instances/{instance}/databases/{database}/operations/{operation}");
  private static final PathTemplate PROJECT_NAME_TEMPLATE =
      PathTemplate.create("projects/{project}");

  private static final Logger logger = Logger.getLogger(SpannerImpl.class.getName());
  private static final Logger txnLogger = Logger.getLogger(TransactionRunner.class.getName());

  private final Random random = new Random();
  private final SpannerRpc rpc;
  private final int defaultPrefetchChunks;

  @GuardedBy("this")
  private final Map dbClients = new HashMap<>();

  private final DatabaseAdminClient dbAdminClient = new DatabaseAdminClientImpl();
  private final InstanceAdminClient instanceClient = new InstanceAdminClientImpl(dbAdminClient);

  @GuardedBy("this")
  private boolean spannerIsClosed = false;

  SpannerImpl(SpannerRpc rpc, int defaultPrefetchChunks, SpannerOptions options) {
    super(options);
    this.rpc = rpc;
    this.defaultPrefetchChunks = defaultPrefetchChunks;
  }

  SpannerImpl(SpannerOptions options) {
    this(options.getSpannerRpcV1(), options.getPrefetchChunks(), options);
  }

  private static ExponentialBackOff newBackOff() {
    return new ExponentialBackOff.Builder()
        .setInitialIntervalMillis(MIN_BACKOFF_MS)
        .setMaxIntervalMillis(MAX_BACKOFF_MS)
        .setMaxElapsedTimeMillis(Integer.MAX_VALUE) // Prevent Backoff.STOP from getting returned.
        .build();
  }

  private static void backoffSleep(Context context, BackOff backoff) throws SpannerException {
    backoffSleep(context, nextBackOffMillis(backoff));
  }

  private static long nextBackOffMillis(BackOff backoff) throws SpannerException {
    try {
      return backoff.nextBackOffMillis();
    } catch (IOException e) {
      throw newSpannerException(ErrorCode.INTERNAL, e.getMessage(), e);
    }
  }

  private static void backoffSleep(Context context, long backoffMillis) throws SpannerException {
    final CountDownLatch latch = new CountDownLatch(1);
    final Context.CancellationListener listener =
        new Context.CancellationListener() {
          @Override
          public void cancelled(Context context) {
            // Wakeup on cancellation / DEADLINE_EXCEEDED.
            latch.countDown();
          }
        };

    context.addListener(listener, DirectExecutor.INSTANCE);
    try {
      if (backoffMillis == BackOff.STOP) {
        // Highly unlikely but we handle it just in case.
        backoffMillis = MAX_BACKOFF_MS;
      }
      if (latch.await(backoffMillis, TimeUnit.MILLISECONDS)) {
        // Woken by context cancellation.
        throw newSpannerExceptionForCancellation(context, null);
      }
    } catch (InterruptedException interruptExcept) {
      throw newSpannerExceptionForCancellation(context, interruptExcept);
    } finally {
      context.removeListener(listener);
    }
  }

  /**
   * Helper to execute some work, retrying with backoff on retryable errors.
   *
   * 

TODO: Consider replacing with RetryHelper from gcloud-core. */ static T runWithRetries(Callable callable) { // Use same backoff setting as abort, somewhat arbitrarily. ExponentialBackOff backOff = newBackOff(); Context context = Context.current(); while (true) { try { return callable.call(); } catch (SpannerException e) { if (!e.isRetryable()) { throw e; } logger.log(Level.FINE, "Retryable exception, will sleep and retry", e); backoffSleep(context, backOff); } catch (Exception e) { throw Throwables.propagate(e); } } } Session createSession(final DatabaseId db) throws SpannerException { final Map options = optionMap(SessionOption.channelHint(random.nextLong())); nl.topicus.jdbc.shaded.com.google.spanner.v1.Session session = runWithRetries( new Callable() { @Override public nl.topicus.jdbc.shaded.com.google.spanner.v1.Session call() throws Exception { return rpc.createSession(db.getName(), options); } }); return new SessionImpl(session.getName(), options); } @Override public DatabaseAdminClient getDatabaseAdminClient() { return dbAdminClient; } @Override public InstanceAdminClient getInstanceAdminClient() { return instanceClient; } @Override public DatabaseClient getDatabaseClient(DatabaseId db) { synchronized (this) { Preconditions.checkState(!spannerIsClosed, "Cloud Spanner client has been closed"); if (dbClients.containsKey(db)) { return dbClients.get(db); } else { SessionPool pool = SessionPool.createPool(getOptions(), db, SpannerImpl.this); DatabaseClientImpl dbClient = new DatabaseClientImpl(pool); dbClients.put(db, dbClient); return dbClient; } } } @Override public void close() { List> closureFutures = null; synchronized (this) { Preconditions.checkState(!spannerIsClosed, "Cloud Spanner client has been closed"); spannerIsClosed = true; closureFutures = new ArrayList<>(); for (DatabaseClientImpl dbClient : dbClients.values()) { closureFutures.add(dbClient.closeAsync()); } dbClients.clear(); } try { Futures.successfulAsList(closureFutures).get(); } catch (InterruptedException | ExecutionException e) { throw SpannerExceptionFactory.newSpannerException(e); } for (ManagedChannel channel : getOptions().getRpcChannels()) { try { channel.shutdown(); } catch (RuntimeException e) { logger.log(Level.WARNING, "Failed to close channel", e); } } } /** * Checks that the current context is still valid, throwing a CANCELLED or DEADLINE_EXCEEDED error * if not. */ private static void checkContext(Context context) { if (context.isCancelled()) { throw newSpannerExceptionForCancellation(context, null); } } /** * Encapsulates state to be passed to the {@link SpannerRpc} layer for a given session. Currently * used to select the {@link nl.topicus.jdbc.shaded.io.grpc.Channel} to be used in issuing the RPCs in a Session. */ static class SessionOption { private final SpannerRpc.Option rpcOption; private final Object value; SessionOption(SpannerRpc.Option option, Object value) { this.rpcOption = checkNotNull(option); this.value = value; } static SessionOption channelHint(long hint) { return new SessionOption(SpannerRpc.Option.CHANNEL_HINT, hint); } SpannerRpc.Option rpcOption() { return rpcOption; } Object value() { return value; } } static Map optionMap(SessionOption... options) { if (options.length == 0) { return Collections.emptyMap(); } Map tmp = Maps.newEnumMap(SpannerRpc.Option.class); for (SessionOption option : options) { Object prev = tmp.put(option.rpcOption(), option.value()); checkArgument(prev == null, "Duplicate option %s", option.rpcOption()); } return ImmutableMap.copyOf(tmp); } private String getProjectId() { return getOptions().getProjectId(); } private String getInstanceName(String instanceId) { return new InstanceId(getProjectId(), instanceId).getName(); } private String getDatabaseName(String instanceId, String databaseId) { return new DatabaseId(new InstanceId(getProjectId(), instanceId), databaseId).getName(); } private T unpack(Any response, Class clazz) throws SpannerException { try { return response.unpack(clazz); } catch (InvalidProtocolBufferException e) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INTERNAL, "Error unpacking response", e); } } private abstract class PageFetcher implements NextPageFetcher { private String nextPageToken; @Override public Page getNextPage() { Paginated nextPage = runWithRetries( new Callable>() { @Override public Paginated call() { return getNextPage(nextPageToken); } }); this.nextPageToken = nextPage.getNextPageToken(); List results = new ArrayList<>(); for (T proto : nextPage.getResults()) { results.add(fromProto(proto)); } return new PageImpl(this, nextPageToken, results); } abstract Paginated getNextPage(@Nullable String nextPageToken); abstract S fromProto(T proto); } private String randomOperationId() { UUID uuid = UUID.randomUUID(); return ("r" + uuid.toString()).replace("-", "_"); } class DatabaseAdminClientImpl implements DatabaseAdminClient { @Override public Operation createDatabase( String instanceId, String databaseId, Iterable statements) throws SpannerException { // CreateDatabase() is not idempotent, so we're not retrying this request. String instanceName = getInstanceName(instanceId); String createStatement = "CREATE DATABASE `" + databaseId + "`"; nl.topicus.jdbc.shaded.com.google.longrunning.Operation op = rpc.createDatabase(instanceName, createStatement, statements); return Operation.create( rpc, op, new Parser() { @Override public Database parseResult(Any response) { return Database.fromProto( unpack(response, nl.topicus.jdbc.shaded.com.google.spanner.admin.database.v1.Database.class), DatabaseAdminClientImpl.this); } @Override public CreateDatabaseMetadata parseMetadata(Any metadata) { return unpack(metadata, CreateDatabaseMetadata.class); } }); } @Override public Database getDatabase(String instanceId, String databaseId) throws SpannerException { final String dbName = getDatabaseName(instanceId, databaseId); Callable callable = new Callable() { @Override public Database call() throws Exception { return Database.fromProto(rpc.getDatabase(dbName), DatabaseAdminClientImpl.this); } }; return runWithRetries(callable); } @Override public Operation updateDatabaseDdl( final String instanceId, final String databaseId, final Iterable statements, @Nullable String operationId) throws SpannerException { final String dbName = getDatabaseName(instanceId, databaseId); final String opId = operationId != null ? operationId : randomOperationId(); Callable> callable = new Callable>() { @Override public Operation call() { nl.topicus.jdbc.shaded.com.google.longrunning.Operation op = null; try { op = rpc.updateDatabaseDdl(dbName, statements, opId); } catch (SpannerException e) { if (e.getErrorCode() == ErrorCode.ALREADY_EXISTS) { String opName = OP_NAME_TEMPLATE.instantiate( "project", getProjectId(), "instance", instanceId, "database", databaseId, "operation", opId); op = nl.topicus.jdbc.shaded.com.google.longrunning.Operation.newBuilder().setName(opName).build(); } else { throw e; } } return Operation.create( rpc, op, new Parser() { @Override public Void parseResult(Any response) { return null; } @Override public UpdateDatabaseDdlMetadata parseMetadata(Any metadata) { return unpack(metadata, UpdateDatabaseDdlMetadata.class); } }); } }; return runWithRetries(callable); } @Override public void dropDatabase(String instanceId, String databaseId) throws SpannerException { final String dbName = getDatabaseName(instanceId, databaseId); Callable callable = new Callable() { @Override public Void call() throws Exception { rpc.dropDatabase(dbName); return null; } }; runWithRetries(callable); } @Override public List getDatabaseDdl(String instanceId, String databaseId) { final String dbName = getDatabaseName(instanceId, databaseId); Callable> callable = new Callable>() { @Override public List call() throws Exception { return rpc.getDatabaseDdl(dbName); } }; return runWithRetries(callable); } @Override public Page listDatabases(String instanceId, ListOption... options) { final String instanceName = getInstanceName(instanceId); final Options listOptions = Options.fromListOptions(options); Preconditions.checkArgument( !listOptions.hasFilter(), "Filter option is not support by" + "listDatabases"); final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; PageFetcher pageFetcher = new PageFetcher() { @Override public Paginated getNextPage( String nextPageToken) { return rpc.listDatabases(instanceName, pageSize, nextPageToken); } @Override public Database fromProto(nl.topicus.jdbc.shaded.com.google.spanner.admin.database.v1.Database proto) { return Database.fromProto(proto, DatabaseAdminClientImpl.this); } }; if (listOptions.hasPageToken()) { pageFetcher.nextPageToken = listOptions.pageToken(); } return pageFetcher.getNextPage(); } } class InstanceAdminClientImpl implements InstanceAdminClient { final DatabaseAdminClient dbClient; InstanceAdminClientImpl(DatabaseAdminClient dbClient) { this.dbClient = dbClient; } @Override public InstanceConfig getInstanceConfig(String configId) throws SpannerException { final String instanceConfigName = new InstanceConfigId(getProjectId(), configId).getName(); return runWithRetries( new Callable() { @Override public InstanceConfig call() { return InstanceConfig.fromProto( rpc.getInstanceConfig(instanceConfigName), InstanceAdminClientImpl.this); } }); } @Override public Page listInstanceConfigs(ListOption... options) { final Options listOptions = Options.fromListOptions(options); Preconditions.checkArgument( !listOptions.hasFilter(), "Filter option is not supported by listInstanceConfigs"); final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; PageFetcher pageFetcher = new PageFetcher() { @Override public Paginated getNextPage( String nextPageToken) { return rpc.listInstanceConfigs(pageSize, nextPageToken); } @Override public InstanceConfig fromProto( nl.topicus.jdbc.shaded.com.google.spanner.admin.instance.v1.InstanceConfig proto) { return InstanceConfig.fromProto(proto, InstanceAdminClientImpl.this); } }; if (listOptions.hasPageToken()) { pageFetcher.nextPageToken = listOptions.pageToken(); } return pageFetcher.getNextPage(); } @Override public Operation createInstance(InstanceInfo instance) throws SpannerException { String projectName = PROJECT_NAME_TEMPLATE.instantiate("project", getProjectId()); nl.topicus.jdbc.shaded.com.google.longrunning.Operation op = rpc.createInstance(projectName, instance.getId().getInstance(), instance.toProto()); return Operation.create( rpc, op, new Parser() { @Override public Instance parseResult(Any response) { return Instance.fromProto( unpack(response, nl.topicus.jdbc.shaded.com.google.spanner.admin.instance.v1.Instance.class), InstanceAdminClientImpl.this, dbClient); } @Override public CreateInstanceMetadata parseMetadata(Any metadata) { return unpack(metadata, CreateInstanceMetadata.class); } }); } @Override public Instance getInstance(String instanceId) throws SpannerException { final String instanceName = new InstanceId(getProjectId(), instanceId).getName(); return runWithRetries( new Callable() { @Override public Instance call() { return Instance.fromProto( rpc.getInstance(instanceName), InstanceAdminClientImpl.this, dbClient); } }); } @Override public Page listInstances(ListOption... options) throws SpannerException { final Options listOptions = Options.fromListOptions(options); final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; final String filter = listOptions.filter(); PageFetcher pageFetcher = new PageFetcher() { @Override public Paginated getNextPage( String nextPageToken) { return rpc.listInstances(pageSize, nextPageToken, filter); } @Override public Instance fromProto(nl.topicus.jdbc.shaded.com.google.spanner.admin.instance.v1.Instance proto) { return Instance.fromProto(proto, InstanceAdminClientImpl.this, dbClient); } }; if (listOptions.hasPageToken()) { pageFetcher.nextPageToken = listOptions.pageToken(); } return pageFetcher.getNextPage(); } @Override public void deleteInstance(final String instanceId) throws SpannerException { runWithRetries( new Callable() { @Override public Void call() { rpc.deleteInstance(new InstanceId(getProjectId(), instanceId).getName()); return null; } }); } @Override public Operation updateInstance( InstanceInfo instance, InstanceInfo.InstanceField... fieldsToUpdate) { FieldMask fieldMask = fieldsToUpdate.length == 0 ? InstanceInfo.InstanceField.toFieldMask(InstanceInfo.InstanceField.values()) : InstanceInfo.InstanceField.toFieldMask(fieldsToUpdate); nl.topicus.jdbc.shaded.com.google.longrunning.Operation op = rpc.updateInstance(instance.toProto(), fieldMask); return Operation.create( rpc, op, new Parser() { @Override public Instance parseResult(Any response) { return Instance.fromProto( unpack(response, nl.topicus.jdbc.shaded.com.google.spanner.admin.instance.v1.Instance.class), InstanceAdminClientImpl.this, dbClient); } @Override public UpdateInstanceMetadata parseMetadata(Any metadata) { return unpack(metadata, UpdateInstanceMetadata.class); } }); } @Override public Instance.Builder newInstanceBuilder(InstanceId id) { return new Instance.Builder(this, dbClient, id); } } class SessionImpl implements Session { private final String name; private SessionTransaction activeTransaction; private ByteString readyTransactionId; private final Map options; SessionImpl(String name, Map options) { this.options = options; this.name = checkNotNull(name); } @Override public String getName() { return name; } @Override public Timestamp write(Iterable mutations) throws SpannerException { TransactionRunner runner = readWriteTransaction(); final Collection finalMutations = mutations instanceof java.util.Collection ? (Collection) mutations : Lists.newArrayList(mutations); runner.run( new TransactionRunner.TransactionCallable() { @Override public Void run(TransactionContext ctx) { ctx.buffer(finalMutations); return null; } }); return runner.getCommitTimestamp(); } @Override public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException { setActive(null); List mutationsProto = new ArrayList<>(); Mutation.toProto(mutations, mutationsProto); final CommitRequest request = CommitRequest.newBuilder() .setSession(name) .addAllMutations(mutationsProto) .setSingleUseTransaction( TransactionOptions.newBuilder() .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance())) .build(); CommitResponse response = runWithRetries( new Callable() { @Override public CommitResponse call() throws Exception { return rpc.nl.topicus.jdbc.shaded.com.it(request, options); } }); try { return Timestamp.fromProto(response.getCommitTimestamp()); } catch (IllegalArgumentException e) { throw newSpannerException(ErrorCode.INTERNAL, "Could not parse nl.topicus.jdbc.shaded.com.it timestamp", e); } } @Override public ReadContext singleUse() { return singleUse(TimestampBound.strong()); } @Override public ReadContext singleUse(TimestampBound bound) { return setActive(new SingleReadContext(this, bound, rpc, defaultPrefetchChunks)); } @Override public ReadOnlyTransaction singleUseReadOnlyTransaction() { return singleUseReadOnlyTransaction(TimestampBound.strong()); } @Override public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) { return setActive(new SingleUseReadOnlyTransaction(this, bound, rpc, defaultPrefetchChunks)); } @Override public ReadOnlyTransaction readOnlyTransaction() { return readOnlyTransaction(TimestampBound.strong()); } @Override public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { return setActive(new MultiUseReadOnlyTransaction(this, bound, rpc, defaultPrefetchChunks)); } @Override public TransactionRunner readWriteTransaction() { return setActive(new TransactionRunnerImpl(this, rpc, defaultPrefetchChunks)); } @Override public void prepareReadWriteTransaction() { setActive(null); readyTransactionId = beginTransaction(); } @Override public void close() { runWithRetries( new Callable() { @Override public Void call() throws Exception { rpc.deleteSession(name, options); return null; } }); } ByteString beginTransaction() { final BeginTransactionRequest request = BeginTransactionRequest.newBuilder() .setSession(name) .setOptions( TransactionOptions.newBuilder() .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance())) .build(); Transaction txn = runWithRetries( new Callable() { @Override public Transaction call() throws Exception { return rpc.beginTransaction(request, options); } }); if (txn.getId().isEmpty()) { throw newSpannerException(ErrorCode.INTERNAL, "Missing id in transaction\n" + getName()); } return txn.getId(); } private T setActive(@Nullable T ctx) { if (activeTransaction != null) { activeTransaction.invalidate(); } activeTransaction = ctx; readyTransactionId = null; return ctx; } } /** * Represents a transaction within a session. "Transaction" here is used in the general sense, * which covers standalone reads, standalone writes, single-use and multi-use read-only * transactions, and read-write transactions. The defining characteristic is that a session may * only have one such transaction active at a time. */ private interface SessionTransaction { /** Invalidates the transaction, generally because a new one has been started on the session. */ void invalidate(); } private abstract static class AbstractReadContext implements ReadContext, AbstractResultSet.Listener, SessionTransaction { final Object lock = new Object(); final SessionImpl session; final SpannerRpc rpc; final int defaultPrefetchChunks; @GuardedBy("lock") private boolean isValid = true; @GuardedBy("lock") private boolean isClosed = false; // Allow up to 2GB to be buffered (assuming 1MB chunks), which is larger than the largest // possible row. In practice, restart tokens are sent much more frequently. private static final int MAX_BUFFERED_CHUNKS = 2048; private AbstractReadContext(SessionImpl session, SpannerRpc rpc, int defaultPrefetchChunks) { this.session = session; this.rpc = rpc; this.defaultPrefetchChunks = defaultPrefetchChunks; } @Override public final ResultSet read( String table, KeySet keys, Iterable columns, ReadOption... options) { return readInternal(table, null, keys, columns, options); } @Override public final ResultSet readUsingIndex( String table, String index, KeySet keys, Iterable columns, ReadOption... options) { return readInternal(table, checkNotNull(index), keys, columns, options); } @Nullable @Override public final Struct readRow(String table, Key key, Iterable columns) { try (ResultSet resultSet = read(table, KeySet.singleKey(key), columns)) { return consumeSingleRow(resultSet); } } @Nullable @Override public final Struct readRowUsingIndex( String table, String index, Key key, Iterable columns) { try (ResultSet resultSet = readUsingIndex(table, index, KeySet.singleKey(key), columns)) { return consumeSingleRow(resultSet); } } @Override public final ResultSet executeQuery(Statement statement, QueryOption... options) { return executeQueryInternal( statement, nl.topicus.jdbc.shaded.com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL, options); } @Override public final ResultSet analyzeQuery( Statement statement, QueryAnalyzeMode readContextQueryMode) { switch (readContextQueryMode) { case PROFILE: return executeQueryInternal( statement, nl.topicus.jdbc.shaded.com.google.spanner.v1.ExecuteSqlRequest.QueryMode.PROFILE); case PLAN: return executeQueryInternal( statement, nl.topicus.jdbc.shaded.com.google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN); default: throw new IllegalStateException( "Unknown value for QueryAnalyzeMode : " + readContextQueryMode); } } private ResultSet executeQueryInternal( Statement statement, nl.topicus.jdbc.shaded.com.google.spanner.v1.ExecuteSqlRequest.QueryMode queryMode, QueryOption... options) { beforeReadOrQuery(); ExecuteSqlRequest.Builder builder = ExecuteSqlRequest.newBuilder() .setSql(statement.getSql()) .setQueryMode(queryMode) .setSession(session.name); Map stmtParameters = statement.getParameters(); if (!stmtParameters.isEmpty()) { nl.topicus.jdbc.shaded.com.google.protobuf.Struct.Builder paramsBuilder = builder.getParamsBuilder(); for (Map.Entry param : stmtParameters.entrySet()) { paramsBuilder.putFields(param.getKey(), param.getValue().toProto()); builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); } } TransactionSelector selector = getTransactionSelector(); if (selector != null) { builder.setTransaction(selector); } final ExecuteSqlRequest request = builder.build(); Options readOptions = Options.fromQueryOptions(options); final int prefetchChunks = readOptions.hasPrefetchChunks() ? readOptions.prefetchChunks() : defaultPrefetchChunks; ResumableStreamIterator stream = new ResumableStreamIterator(MAX_BUFFERED_CHUNKS) { @Override CloseableIterator startStream(@Nullable ByteString resumeToken) { GrpcStreamIterator stream = new GrpcStreamIterator(prefetchChunks); SpannerRpc.StreamingCall call = rpc.executeQuery( resumeToken == null ? request : request.toBuilder().setResumeToken(resumeToken).build(), stream.consumer(), session.options); // We get one message for free. if (prefetchChunks > 1) { call.request(prefetchChunks - 1); } stream.setCall(call); return stream; } }; return new GrpcResultSet(stream, this, queryMode); } /** * Called before any read or query is started to perform state checks and initializations. * Subclasses should call {@code super.beforeReadOrQuery()} if overriding. */ void beforeReadOrQuery() { synchronized (lock) { beforeReadOrQueryLocked(); } } /** Called as part of {@link #beforeReadOrQuery()} under {@link #lock}. */ @GuardedBy("lock") void beforeReadOrQueryLocked() { // Note that transactions are invalidated under some circumstances on the backend, but we // implement the check more strictly here to encourage coding to contract rather than the // implementation. checkState(isValid, "Context has been invalidated by a new operation on the session"); checkState(!isClosed, "Context has been closed"); } /** Invalidates the context since another context has been created more recently. */ @Override public final void invalidate() { synchronized (lock) { isValid = false; } } @Override public void close() { synchronized (lock) { isClosed = true; } } @Nullable abstract TransactionSelector getTransactionSelector(); @Override public void onTransactionMetadata(Transaction transaction) {} @Override public void onError(SpannerException e) {} @Override public void onDone() {} private ResultSet readInternal( String table, @Nullable String index, KeySet keys, Iterable columns, ReadOption... options) { beforeReadOrQuery(); ReadRequest.Builder builder = ReadRequest.newBuilder() .setSession(session.name) .setTable(checkNotNull(table)) .addAllColumns(columns); Options readOptions = Options.fromReadOptions(options); if (readOptions.hasLimit()) { builder.setLimit(readOptions.limit()); } keys.appendToProto(builder.getKeySetBuilder()); if (index != null) { builder.setIndex(index); } TransactionSelector selector = getTransactionSelector(); if (selector != null) { builder.setTransaction(selector); } final ReadRequest request = builder.build(); final int prefetchChunks = readOptions.hasPrefetchChunks() ? readOptions.prefetchChunks() : defaultPrefetchChunks; ResumableStreamIterator stream = new ResumableStreamIterator(MAX_BUFFERED_CHUNKS) { @Override CloseableIterator startStream(@Nullable ByteString resumeToken) { GrpcStreamIterator stream = new GrpcStreamIterator(prefetchChunks); SpannerRpc.StreamingCall call = rpc.read( resumeToken == null ? request : request.toBuilder().setResumeToken(resumeToken).build(), stream.consumer(), session.options); // We get one message for free. if (prefetchChunks > 1) { call.request(prefetchChunks - 1); } stream.setCall(call); return stream; } }; GrpcResultSet resultSet = new GrpcResultSet(stream, this, nl.topicus.jdbc.shaded.com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL); return resultSet; } private Struct consumeSingleRow(ResultSet resultSet) { if (!resultSet.next()) { return null; } Struct row = resultSet.getCurrentRowAsStruct(); if (resultSet.next()) { throw newSpannerException(ErrorCode.INTERNAL, "Multiple rows returned for single key"); } return row; } } private enum DirectExecutor implements Executor { INSTANCE; @Override public void execute(Runnable nl.topicus.jdbc.shaded.com.and) { nl.topicus.jdbc.shaded.com.and.run(); } } @VisibleForTesting static class TransactionRunnerImpl implements SessionTransaction, TransactionRunner { /** Allow for testing of backoff logic */ static class Sleeper { void backoffSleep(Context context, long backoffMillis) { SpannerImpl.backoffSleep(context, backoffMillis); } } private final SessionImpl session; private final Sleeper sleeper; private TransactionContextImpl txn; private volatile boolean isValid = true; TransactionRunnerImpl( SessionImpl session, SpannerRpc rpc, Sleeper sleeper, int defaultPrefetchChunks) { ByteString transactionId = session.readyTransactionId; session.readyTransactionId = null; this.session = session; this.sleeper = sleeper; this.txn = new TransactionContextImpl(session, transactionId, rpc, defaultPrefetchChunks); } TransactionRunnerImpl(SessionImpl session, SpannerRpc rpc, int defaultPrefetchChunks) { this(session, rpc, new Sleeper(), defaultPrefetchChunks); } @Nullable @Override public T run(TransactionCallable callable) { BackOff backoff = newBackOff(); final Context context = Context.current(); while (true) { checkState( isValid, "TransactionRunner has been invalidated by a new operation on the session"); checkContext(context); // TODO(user): When using streaming reads, consider using the first read to begin // the txn. txn.ensureTxn(); T result; boolean shouldRollback = true; try { result = callable.run(txn); shouldRollback = false; } catch (Exception e) { txnLogger.log(Level.FINE, "User-provided TransactionCallable raised exception", e); if (txn.isAborted()) { shouldRollback = false; backoff(context, backoff); continue; } if (e instanceof SpannerException) { throw (SpannerException) e; } else { throw newSpannerException(ErrorCode.UNKNOWN, e.getMessage(), e); } } finally { if (shouldRollback) { txn.rollback(); } } try { txn.nl.topicus.jdbc.shaded.com.it(); return result; } catch (AbortedException e) { txnLogger.log(Level.FINE, "Commit aborted", e); backoff(context, backoff); } } } @Override public Timestamp getCommitTimestamp() { return txn.nl.topicus.jdbc.shaded.com.itTimestamp(); } @Override public void invalidate() { isValid = false; } private void backoff(Context context, BackOff backoff) { long delay = txn.getRetryDelayInMillis(backoff); txn = new TransactionContextImpl(session, null, txn.rpc, txn.defaultPrefetchChunks); sleeper.backoffSleep(context, delay); } } @VisibleForTesting static class TransactionContextImpl extends AbstractReadContext implements TransactionContext { @GuardedBy("lock") private List mutations = new ArrayList<>(); @GuardedBy("lock") private boolean aborted; /** Default to -1 to indicate not available. */ @GuardedBy("lock") private long retryDelayInMillis = -1L; private ByteString transactionId; private Timestamp nl.topicus.jdbc.shaded.com.itTimestamp; TransactionContextImpl( SessionImpl session, @Nullable ByteString transactionId, SpannerRpc rpc, int defaultPrefetchChunks) { super(session, rpc, defaultPrefetchChunks); this.transactionId = transactionId; } void ensureTxn() { if (transactionId == null) { transactionId = session.beginTransaction(); txnLogger.log( Level.FINER, "Started transaction {0}", txnLogger.isLoggable(Level.FINER) ? transactionId.asReadOnlyByteBuffer() : null); } else { txnLogger.log( Level.FINER, "Using prepared transaction {0}", txnLogger.isLoggable(Level.FINER) ? transactionId.asReadOnlyByteBuffer() : null); } } void nl.topicus.jdbc.shaded.com.it() { CommitRequest.Builder builder = CommitRequest.newBuilder().setSession(session.getName()).setTransactionId(transactionId); synchronized (lock) { if (!mutations.isEmpty()) { List mutationsProto = new ArrayList<>(); Mutation.toProto(mutations, mutationsProto); builder.addAllMutations(mutationsProto); } // Ensure that no call to buffer mutations that would be lost can succeed. mutations = null; } final CommitRequest nl.topicus.jdbc.shaded.com.itRequest = builder.build(); CommitResponse nl.topicus.jdbc.shaded.com.itResponse = runWithRetries( new Callable() { @Override public CommitResponse call() throws Exception { return rpc.nl.topicus.jdbc.shaded.com.it(nl.topicus.jdbc.shaded.com.itRequest, session.options); } }); if (!nl.topicus.jdbc.shaded.com.itResponse.hasCommitTimestamp()) { throw newSpannerException( ErrorCode.INTERNAL, "Missing nl.topicus.jdbc.shaded.com.itTimestamp:\n" + session.getName()); } nl.topicus.jdbc.shaded.com.itTimestamp = Timestamp.fromProto(nl.topicus.jdbc.shaded.com.itResponse.getCommitTimestamp()); } Timestamp nl.topicus.jdbc.shaded.com.itTimestamp() { checkState(nl.topicus.jdbc.shaded.com.itTimestamp != null, "run() has not yet returned normally"); return nl.topicus.jdbc.shaded.com.itTimestamp; } boolean isAborted() { synchronized (lock) { return aborted; } } /** Return the delay in milliseconds between requests to Cloud Spanner. */ long getRetryDelayInMillis(BackOff backoff) { long delay = nextBackOffMillis(backoff); synchronized (lock) { if (retryDelayInMillis >= 0) { return retryDelayInMillis; } } return delay; } void rollback() { // We're exiting early due to a user exception, but the transaction is still active. // Send a rollback for the transaction to release any locks held. // TODO(user): Make this an async fire-and-forget request. try { // Note that we're not retrying this request since we don't particularly care about the // response. Normally, the next thing that will happen is that we will make a fresh // transaction attempt, which should implicitly abort this one. rpc.rollback( RollbackRequest.newBuilder() .setSession(session.getName()) .setTransactionId(transactionId) .build(), session.options); } catch (SpannerException e) { txnLogger.log(Level.FINE, "Exception during rollback", e); } } @Nullable @Override TransactionSelector getTransactionSelector() { return TransactionSelector.newBuilder().setId(transactionId).build(); } @Override public void onError(SpannerException e) { if (e.getErrorCode() == ErrorCode.ABORTED) { long delay = -1L; if (e instanceof AbortedException) { delay = ((AbortedException) e).getRetryDelayInMillis(); } if (delay == -1L) { txnLogger.log(Level.FINE, "Retry duration is missing from the exception.", e); } synchronized (lock) { retryDelayInMillis = delay; aborted = true; } } } @Override public void buffer(Mutation mutation) { synchronized (lock) { checkNotNull(mutations, "Context is closed"); mutations.add(checkNotNull(mutation)); } } @Override public void buffer(Iterable mutations) { synchronized (lock) { checkNotNull(this.mutations, "Context is closed"); for (Mutation mutation : mutations) { this.mutations.add(checkNotNull(mutation)); } } } } /** * A {@code ReadContext} for standalone reads. This can only be used for a single operation, since * each standalone read may see a different timestamp of Cloud Spanner data. */ private static class SingleReadContext extends AbstractReadContext { final TimestampBound bound; @GuardedBy("lock") private boolean used; private SingleReadContext( SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { super(session, rpc, defaultPrefetchChunks); this.bound = bound; } @GuardedBy("lock") @Override void beforeReadOrQueryLocked() { super.beforeReadOrQueryLocked(); checkState(!used, "Cannot use a single-read ReadContext for multiple reads"); used = true; } @Override @Nullable TransactionSelector getTransactionSelector() { if (bound.getMode() == TimestampBound.Mode.STRONG) { // Default mode: no need to specify a transaction. return null; } return TransactionSelector.newBuilder() .setSingleUse(TransactionOptions.newBuilder().setReadOnly(bound.toProto())) .build(); } } private static void assertTimestampAvailable(boolean available) { checkState(available, "Method can only be called after read has returned data or finished"); } private class SingleUseReadOnlyTransaction extends SingleReadContext implements ReadOnlyTransaction { @GuardedBy("lock") private Timestamp timestamp; private SingleUseReadOnlyTransaction( SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { super(session, bound, rpc, defaultPrefetchChunks); } @Override public Timestamp getReadTimestamp() { synchronized (lock) { assertTimestampAvailable(timestamp != null); return timestamp; } } @Override @Nullable TransactionSelector getTransactionSelector() { TransactionOptions.Builder options = TransactionOptions.newBuilder(); bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); return TransactionSelector.newBuilder().setSingleUse(options).build(); } @Override public void onTransactionMetadata(Transaction transaction) { synchronized (lock) { if (!transaction.hasReadTimestamp()) { throw newSpannerException( ErrorCode.INTERNAL, "Missing expected transaction.read_timestamp metadata field"); } try { timestamp = Timestamp.fromProto(transaction.getReadTimestamp()); } catch (IllegalArgumentException e) { throw newSpannerException( ErrorCode.INTERNAL, "Bad value in transaction.read_timestamp metadata field", e); } } } } private class MultiUseReadOnlyTransaction extends AbstractReadContext implements ReadOnlyTransaction { private final TimestampBound bound; private final Object txnLock = new Object(); @GuardedBy("txnLock") private Timestamp timestamp; @GuardedBy("txnLock") private ByteString transactionId; private MultiUseReadOnlyTransaction( SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { super(session, rpc, defaultPrefetchChunks); checkArgument( bound.getMode() != TimestampBound.Mode.MAX_STALENESS && bound.getMode() != TimestampBound.Mode.MIN_READ_TIMESTAMP, "Bounded staleness mode %s is not supported for multi-use read-only transactions." + " Create a single-use read or read-only transaction instead.", bound.getMode()); this.bound = bound; } @Override void beforeReadOrQuery() { super.beforeReadOrQuery(); initTransaction(); } @Override @Nullable TransactionSelector getTransactionSelector() { // No need for synchronization: super.readInternal() is always preceded by a check of // "transactionId" that provides a happens-before from initialization, and the value is never // changed afterwards. @SuppressWarnings("GuardedByChecker") TransactionSelector selector = TransactionSelector.newBuilder().setId(transactionId).build(); return selector; } @Override public Timestamp getReadTimestamp() { synchronized (txnLock) { assertTimestampAvailable(timestamp != null); return timestamp; } } private void initTransaction() { // Since we only support synchronous calls, just block on "txnLock" while the RPC is in // flight. Note that we use the strategy of sending an explicit BeginTransaction() RPC, // rather than using the first read in the transaction to begin it implicitly. The chosen // strategy is sub-optimal in the case of the first read being fast, as it incurs an extra // RTT, but optimal if the first read is slow. Since we don't know how fast the read will be, // and we are using non-streaming reads (so we don't see the metadata until the entire read // has finished), using BeginTransaction() is the safest path. // TODO(user): Fix nl.topicus.jdbc.shaded.com.ent / begin transaction on first read; we now use streaming reads. synchronized (txnLock) { if (transactionId != null) { return; } TransactionOptions.Builder options = TransactionOptions.newBuilder(); bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); final BeginTransactionRequest request = BeginTransactionRequest.newBuilder() .setSession(session.getName()) .setOptions(options) .build(); Transaction transaction = runWithRetries( new Callable() { @Override public Transaction call() throws Exception { return rpc.beginTransaction(request, session.options); } }); if (!transaction.hasReadTimestamp()) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INTERNAL, "Missing expected transaction.read_timestamp metadata field"); } if (transaction.getId().isEmpty()) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INTERNAL, "Missing expected transaction.id metadata field"); } try { timestamp = Timestamp.fromProto(transaction.getReadTimestamp()); } catch (IllegalArgumentException e) { throw SpannerExceptionFactory.newSpannerException( ErrorCode.INTERNAL, "Bad value in transaction.read_timestamp metadata field", e); } transactionId = transaction.getId(); } } } @VisibleForTesting abstract static class AbstractResultSet extends AbstractStructReader implements ResultSet { interface Listener { /** * Called when transaction metadata is seen. This method may be invoked at most once. If the * method is invoked, it will precede {@link #onError(SpannerException)} or {@link #onDone()}. */ void onTransactionMetadata(Transaction transaction) throws SpannerException; /** Called when the read finishes with an error. */ void onError(SpannerException e); /** Called when the read finishes normally. */ void onDone(); } protected abstract GrpcStruct currRow(); @Override public Struct getCurrentRowAsStruct() { return currRow().immutableCopy(); } @Override protected boolean getBooleanInternal(int columnIndex) { return currRow().getBooleanInternal(columnIndex); } @Override protected long getLongInternal(int columnIndex) { return currRow().getLongInternal(columnIndex); } @Override protected double getDoubleInternal(int columnIndex) { return currRow().getDoubleInternal(columnIndex); } @Override protected String getStringInternal(int columnIndex) { return currRow().getStringInternal(columnIndex); } @Override protected ByteArray getBytesInternal(int columnIndex) { return currRow().getBytesInternal(columnIndex); } @Override protected Timestamp getTimestampInternal(int columnIndex) { return currRow().getTimestampInternal(columnIndex); } @Override protected Date getDateInternal(int columnIndex) { return currRow().getDateInternal(columnIndex); } @Override protected boolean[] getBooleanArrayInternal(int columnIndex) { return currRow().getBooleanArrayInternal(columnIndex); } @Override protected List getBooleanListInternal(int columnIndex) { return currRow().getBooleanListInternal(columnIndex); } @Override protected long[] getLongArrayInternal(int columnIndex) { return currRow().getLongArrayInternal(columnIndex); } @Override protected List getLongListInternal(int columnIndex) { return currRow().getLongListInternal(columnIndex); } @Override protected double[] getDoubleArrayInternal(int columnIndex) { return currRow().getDoubleArrayInternal(columnIndex); } @Override protected List getDoubleListInternal(int columnIndex) { return currRow().getDoubleListInternal(columnIndex); } @Override protected List getStringListInternal(int columnIndex) { return currRow().getStringListInternal(columnIndex); } @Override protected List getBytesListInternal(int columnIndex) { return currRow().getBytesListInternal(columnIndex); } @Override protected List getTimestampListInternal(int columnIndex) { return currRow().getTimestampListInternal(columnIndex); } @Override protected List getDateListInternal(int columnIndex) { return currRow().getDateListInternal(columnIndex); } @Override protected List getStructListInternal(int columnIndex) { return currRow().getStructListInternal(columnIndex); } @Override public boolean isNull(int columnIndex) { return currRow().isNull(columnIndex); } } @VisibleForTesting static class GrpcResultSet extends AbstractResultSet> { private final GrpcValueIterator iterator; private final Listener listener; private final QueryMode queryMode; private GrpcStruct currRow; private SpannerException error; private ResultSetStats statistics; private boolean closed; GrpcResultSet( CloseableIterator iterator, Listener listener, QueryMode queryMode) { this.iterator = new GrpcValueIterator(iterator); this.listener = listener; this.queryMode = queryMode; } @Override protected GrpcStruct currRow() { checkState(!closed, "ResultSet is closed"); checkState(currRow != null, "next() call required"); return currRow; } @Override public boolean next() throws SpannerException { if (error != null) { throw newSpannerException(error); } try { if (currRow == null) { ResultSetMetadata metadata = iterator.getMetadata(); if (metadata.hasTransaction()) { listener.onTransactionMetadata(metadata.getTransaction()); } currRow = new GrpcStruct(iterator.type(), new ArrayList<>()); } boolean hasNext = currRow.consumeRow(iterator); if (queryMode != QueryMode.NORMAL && !hasNext) { statistics = iterator.getStats(); } return hasNext; } catch (SpannerException e) { throw yieldError(e); } } @Override public ResultSetStats getStats() { if (queryMode == QueryMode.NORMAL) { throw new UnsupportedOperationException( "ResultSetStats are available only in PLAN and PROFILE execution modes"); } checkState( statistics != null, "ResultSetStats requested before consuming the entire ResultSet"); return statistics; } @Override public void close() { iterator.close("ResultSet closed"); closed = true; } @Override public Type getType() { checkState(currRow != null, "next() call required"); return currRow.getType(); } private SpannerException yieldError(SpannerException e) { close(); listener.onError(e); throw e; } } private static class GrpcStruct extends Struct implements Serializable { protected final Type type; protected final List rowData; /** * Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used * as a serialization proxy. */ private Object writeReplace() { Builder builder = Struct.newBuilder(); List structFields = getType().getStructFields(); for (int i = 0; i < structFields.size(); i++) { Type.StructField field = structFields.get(i); String fieldName = field.getName(); Object value = rowData.get(i); Type fieldType = field.getType(); switch (fieldType.getCode()) { case BOOL: builder.set(fieldName).to((Boolean) value); break; case INT64: builder.set(fieldName).to((Long) value); break; case FLOAT64: builder.set(fieldName).to((Double) value); break; case STRING: builder.set(fieldName).to((String) value); break; case BYTES: builder.set(fieldName).to((ByteArray) value); break; case TIMESTAMP: builder.set(fieldName).to((Timestamp) value); break; case DATE: builder.set(fieldName).to((Date) value); break; case ARRAY: switch(fieldType.getArrayElementType().getCode()) { case BOOL: builder.set(fieldName).toBoolArray((Iterable) value); break; case INT64: builder.set(fieldName).toInt64Array((Iterable) value); break; case FLOAT64: builder.set(fieldName).toFloat64Array((Iterable) value); break; case STRING: builder.set(fieldName).toStringArray((Iterable) value); break; case BYTES: builder.set(fieldName).toBytesArray((Iterable) value); break; case TIMESTAMP: builder.set(fieldName).toTimestampArray((Iterable) value); break; case DATE: builder.set(fieldName).toDateArray((Iterable) value); break; case STRUCT: builder.add(fieldName, fieldType.getArrayElementType().getStructFields(), (Iterable) value); break; } break; case STRUCT: // Not a legal top-level field type. default: throw new AssertionError("Unhandled type code: " + fieldType.getCode()); } } return builder.build(); } GrpcStruct(Type type, List rowData) { this.type = type; this.rowData = rowData; } boolean consumeRow(Iterator iterator) { rowData.clear(); if (!iterator.hasNext()) { return false; } for (Type.StructField fieldType : getType().getStructFields()) { if (!iterator.hasNext()) { throw newSpannerException( ErrorCode.INTERNAL, "Invalid value stream: end of stream reached before row is nl.topicus.jdbc.shaded.com.lete"); } nl.topicus.jdbc.shaded.com.google.protobuf.Value value = iterator.next(); rowData.add(decodeValue(fieldType.getType(), value)); } return true; } private static Object decodeValue(Type fieldType, nl.topicus.jdbc.shaded.com.google.protobuf.Value proto) { if (proto.getKindCase() == KindCase.NULL_VALUE) { return null; } switch (fieldType.getCode()) { case BOOL: checkType(fieldType, proto, KindCase.BOOL_VALUE); return proto.getBoolValue(); case INT64: checkType(fieldType, proto, KindCase.STRING_VALUE); return Long.parseLong(proto.getStringValue()); case FLOAT64: return valueProtoToFloat64(proto); case STRING: checkType(fieldType, proto, KindCase.STRING_VALUE); return proto.getStringValue(); case BYTES: checkType(fieldType, proto, KindCase.STRING_VALUE); return ByteArray.fromBase64(proto.getStringValue()); case TIMESTAMP: checkType(fieldType, proto, KindCase.STRING_VALUE); return Timestamp.parseTimestamp(proto.getStringValue()); case DATE: checkType(fieldType, proto, KindCase.STRING_VALUE); return Date.parseDate(proto.getStringValue()); case ARRAY: checkType(fieldType, proto, KindCase.LIST_VALUE); ListValue listValue = proto.getListValue(); return decodeArrayValue(fieldType.getArrayElementType(), listValue); case STRUCT: // Not a legal top-level field type. default: throw new AssertionError("Unhandled type code: " + fieldType.getCode()); } } private static Object decodeArrayValue(Type elementType, ListValue listValue) { switch (elementType.getCode()) { case BOOL: // Use a view: element conversion is virtually free. return Lists.transform( listValue.getValuesList(), new Function() { @Override public Boolean apply(nl.topicus.jdbc.shaded.com.google.protobuf.Value input) { return input.getKindCase() == KindCase.NULL_VALUE ? null : input.getBoolValue(); } }); case INT64: // For int64/float64 types, use custom containers. These avoid wrapper object // creation for non-null arrays. return new Int64Array(listValue); case FLOAT64: return new Float64Array(listValue); case STRING: return Lists.transform( listValue.getValuesList(), new Function() { @Override public String apply(nl.topicus.jdbc.shaded.com.google.protobuf.Value input) { return input.getKindCase() == KindCase.NULL_VALUE ? null : input.getStringValue(); } }); case BYTES: { // Materialize list: element conversion is expensive and should happen only once. ArrayList list = new ArrayList<>(listValue.getValuesCount()); for (nl.topicus.jdbc.shaded.com.google.protobuf.Value value : listValue.getValuesList()) { list.add( value.getKindCase() == KindCase.NULL_VALUE ? null : ByteArray.fromBase64(value.getStringValue())); } return list; } case TIMESTAMP: { // Materialize list: element conversion is expensive and should happen only once. ArrayList list = new ArrayList<>(listValue.getValuesCount()); for (nl.topicus.jdbc.shaded.com.google.protobuf.Value value : listValue.getValuesList()) { list.add( value.getKindCase() == KindCase.NULL_VALUE ? null : Timestamp.parseTimestamp(value.getStringValue())); } return list; } case DATE: { // Materialize list: element conversion is expensive and should happen only once. ArrayList list = new ArrayList<>(listValue.getValuesCount()); for (nl.topicus.jdbc.shaded.com.google.protobuf.Value value : listValue.getValuesList()) { list.add( value.getKindCase() == KindCase.NULL_VALUE ? null : Date.parseDate(value.getStringValue())); } return list; } case STRUCT: { ArrayList list = new ArrayList<>(listValue.getValuesCount()); for (nl.topicus.jdbc.shaded.com.google.protobuf.Value value : listValue.getValuesList()) { if (value.getKindCase() == KindCase.NULL_VALUE) { list.add(null); } else { List fieldTypes = elementType.getStructFields(); List fields = new ArrayList<>(fieldTypes.size()); ListValue structValues = value.getListValue(); checkArgument( structValues.getValuesCount() == fieldTypes.size(), "Size mismatch between type descriptor and actual values."); for (int i = 0; i < fieldTypes.size(); ++i) { fields.add(decodeValue(fieldTypes.get(i).getType(), structValues.getValues(i))); } list.add(new GrpcStruct(elementType, fields)); } } return list; } default: throw new AssertionError("Unhandled type code: " + elementType.getCode()); } } private static void checkType( Type fieldType, nl.topicus.jdbc.shaded.com.google.protobuf.Value proto, KindCase expected) { if (proto.getKindCase() != expected) { throw newSpannerException( ErrorCode.INTERNAL, "Invalid value for column type " + fieldType + " expected " + expected + " but was " + proto.getKindCase()); } } Struct immutableCopy() { return new GrpcStruct(type, new ArrayList<>(rowData)); } @Override public Type getType() { return type; } @Override public boolean isNull(int columnIndex) { return rowData.get(columnIndex) == null; } @Override protected boolean getBooleanInternal(int columnIndex) { return (Boolean) rowData.get(columnIndex); } @Override protected long getLongInternal(int columnIndex) { return (Long) rowData.get(columnIndex); } @Override protected double getDoubleInternal(int columnIndex) { return (Double) rowData.get(columnIndex); } @Override protected String getStringInternal(int columnIndex) { return (String) rowData.get(columnIndex); } @Override protected ByteArray getBytesInternal(int columnIndex) { return (ByteArray) rowData.get(columnIndex); } @Override protected Timestamp getTimestampInternal(int columnIndex) { return (Timestamp) rowData.get(columnIndex); } @Override protected Date getDateInternal(int columnIndex) { return (Date) rowData.get(columnIndex); } @Override protected boolean[] getBooleanArrayInternal(int columnIndex) { @SuppressWarnings("unchecked") // We know ARRAY produces a List. List values = (List) rowData.get(columnIndex); boolean[] r = new boolean[values.size()]; for (int i = 0; i < values.size(); ++i) { if (values.get(i) == null) { throw throwNotNull(columnIndex); } r[i] = values.get(i); } return r; } @Override @SuppressWarnings("unchecked") // We know ARRAY produces a List. protected List getBooleanListInternal(int columnIndex) { return Collections.unmodifiableList((List) rowData.get(columnIndex)); } @Override protected long[] getLongArrayInternal(int columnIndex) { return getLongListInternal(columnIndex).toPrimitiveArray(columnIndex); } @Override @SuppressWarnings("unchecked") // We know ARRAY produces an Int64Array. protected Int64Array getLongListInternal(int columnIndex) { return (Int64Array) rowData.get(columnIndex); } @Override protected double[] getDoubleArrayInternal(int columnIndex) { return getDoubleListInternal(columnIndex).toPrimitiveArray(columnIndex); } @Override @SuppressWarnings("unchecked") // We know ARRAY produces a Float64Array. protected Float64Array getDoubleListInternal(int columnIndex) { return (Float64Array) rowData.get(columnIndex); } @Override @SuppressWarnings("unchecked") // We know ARRAY produces a List. protected List getStringListInternal(int columnIndex) { return Collections.unmodifiableList((List) rowData.get(columnIndex)); } @Override @SuppressWarnings("unchecked") // We know ARRAY produces a List. protected List getBytesListInternal(int columnIndex) { return Collections.unmodifiableList((List) rowData.get(columnIndex)); } @Override @SuppressWarnings("unchecked") // We know ARRAY produces a List. protected List getTimestampListInternal(int columnIndex) { return Collections.unmodifiableList((List) rowData.get(columnIndex)); } @Override @SuppressWarnings("unchecked") // We know ARRAY produces a List. protected List getDateListInternal(int columnIndex) { return Collections.unmodifiableList((List) rowData.get(columnIndex)); } @Override @SuppressWarnings("unchecked") // We know ARRAY> produces a List. protected List getStructListInternal(int columnIndex) { return Collections.unmodifiableList((List) rowData.get(columnIndex)); } } @VisibleForTesting interface CloseableIterator extends Iterator { /** * Closes the iterator, freeing any underlying resources. * * @param message a message to include in the final RPC status */ void close(@Nullable String message); } /** Adapts a streaming read/query call into an iterator over partial result sets. */ @VisibleForTesting static class GrpcStreamIterator extends AbstractIterator implements CloseableIterator { private static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build(); private final ConsumerImpl consumer = new ConsumerImpl(); private final BlockingQueue stream; private SpannerRpc.StreamingCall call; private SpannerException error; // Visible for testing. GrpcStreamIterator(int prefetchChunks) { // One extra to allow for END_OF_STREAM message. this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1); } protected final SpannerRpc.ResultStreamConsumer consumer() { return consumer; } public void setCall(SpannerRpc.StreamingCall call) { this.call = call; } @Override public void close(@Nullable String message) { if (call != null) { call.cancel(message); } } @Override protected final PartialResultSet nl.topicus.jdbc.shaded.com.uteNext() { PartialResultSet next; try { // TODO: Ideally honor nl.topicus.jdbc.shaded.io.grpc.Context while blocking here. In practice, // cancellation/deadline results in an error being delivered to "stream", which // should mean that we do not block significantly longer afterwards, but it would // be more robust to use poll() with a timeout. next = stream.take(); } catch (InterruptedException e) { // Treat interrupt as a request to cancel the read. throw SpannerExceptionFactory.propagateInterrupt(e); } if (next != END_OF_STREAM) { call.request(1); return next; } // All done - close() no longer needs to cancel the call. call = null; if (error != null) { throw SpannerExceptionFactory.newSpannerException(error); } endOfData(); return null; } private void addToStream(PartialResultSet results) { // We assume that nothing from the user will interrupt gRPC event threads. Uninterruptibles.putUninterruptibly(stream, results); } private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer { @Override public void onPartialResultSet(PartialResultSet results) { addToStream(results); } @Override public void onCompleted() { addToStream(END_OF_STREAM); } @Override public void onError(SpannerException e) { error = e; addToStream(END_OF_STREAM); } // Visible only for testing. @VisibleForTesting void setCall(SpannerRpc.StreamingCall call) { GrpcStreamIterator.this.setCall(call); } } } /** * Wraps an iterator over partial result sets, supporting resuming RPCs on error. This class keeps * track of the most recent resume token seen, and will buffer partial result set chunks that do * not have a resume token until one is seen or buffer space is exceeded, which reduces the chance * of yielding data to the caller that cannot be resumed. */ @VisibleForTesting abstract static class ResumableStreamIterator extends AbstractIterator implements CloseableIterator { private final BackOff backOff = newBackOff(); private final LinkedList buffer = new LinkedList<>(); private final int maxBufferSize; private CloseableIterator stream; private ByteString resumeToken; private boolean finished; /** * Indicates whether it is currently safe to retry RPCs. This will be {@code false} if we have * reached the maximum buffer size without seeing a restart token; in this case, we will drain * the buffer and remain in this state until we see a new restart token. */ private boolean safeToRetry = true; protected ResumableStreamIterator(int maxBufferSize) { checkArgument(maxBufferSize >= 0); this.maxBufferSize = maxBufferSize; } abstract CloseableIterator startStream(@Nullable ByteString resumeToken); @Override public void close(@Nullable String message) { if (stream != null) { stream.close(message); } } @Override protected PartialResultSet nl.topicus.jdbc.shaded.com.uteNext() { Context context = Context.current(); while (true) { // Eagerly start stream before consuming any buffered items. if (stream == null) { stream = checkNotNull(startStream(resumeToken)); } // Buffer contains items up to a resume token or has reached capacity: flush. if (!buffer.isEmpty() && (finished || !safeToRetry || !buffer.getLast().getResumeToken().isEmpty())) { return buffer.pop(); } try { if (stream.hasNext()) { PartialResultSet next = stream.next(); boolean hasResumeToken = !next.getResumeToken().isEmpty(); if (hasResumeToken) { resumeToken = next.getResumeToken(); safeToRetry = true; } // If the buffer is empty and this chunk has a resume token or we cannot resume safely // anyway, we can yield it immediately rather than placing it in the buffer to be // returned on the next iteration. if ((hasResumeToken || !safeToRetry) && buffer.isEmpty()) { return next; } buffer.add(next); if (buffer.size() > maxBufferSize && buffer.getLast().getResumeToken().isEmpty()) { // We need to flush without a restart token. Errors encountered until we see // such a token will fail the read. safeToRetry = false; } } else { finished = true; if (buffer.isEmpty()) { endOfData(); return null; } } } catch (SpannerException e) { if (safeToRetry && e.isRetryable()) { logger.log(Level.FINE, "Retryable exception, will sleep and retry", e); // Truncate any items in the buffer before the last retry token. while (!buffer.isEmpty() && buffer.getLast().getResumeToken().isEmpty()) { buffer.removeLast(); } assert buffer.isEmpty() || buffer.getLast().getResumeToken().equals(resumeToken); stream = null; backoffSleep(context, backOff); continue; } throw e; } } } } /** * Adapts a stream of {@code PartialResultSet} messages into a stream of {@code Value} messages. */ private static class GrpcValueIterator extends AbstractIterator { private enum StreamValue { METADATA, RESULT, } private final CloseableIterator stream; private ResultSetMetadata metadata; private Type type; private PartialResultSet current; private int pos; private ResultSetStats statistics; GrpcValueIterator(CloseableIterator stream) { this.stream = stream; } @SuppressWarnings("unchecked") @Override protected nl.topicus.jdbc.shaded.com.google.protobuf.Value nl.topicus.jdbc.shaded.com.uteNext() { if (!ensureReady(StreamValue.RESULT)) { endOfData(); return null; } nl.topicus.jdbc.shaded.com.google.protobuf.Value value = current.getValues(pos++); KindCase kind = value.getKindCase(); if (!isMergeable(kind)) { if (pos == current.getValuesCount() && current.getChunkedValue()) { throw newSpannerException(ErrorCode.INTERNAL, "Unexpected chunked PartialResultSet."); } else { return value; } } if (!current.getChunkedValue() || pos != current.getValuesCount()) { return value; } Object merged = kind == KindCase.STRING_VALUE ? value.getStringValue() : new ArrayList(value.getListValue().getValuesList()); while (current.getChunkedValue() && pos == current.getValuesCount()) { if (!ensureReady(StreamValue.RESULT)) { throw newSpannerException( ErrorCode.INTERNAL, "Stream closed in the middle of chunked value"); } nl.topicus.jdbc.shaded.com.google.protobuf.Value newValue = current.getValues(pos++); if (newValue.getKindCase() != kind) { throw newSpannerException( ErrorCode.INTERNAL, "Unexpected type in middle of chunked value. Expected: " + kind + " but got: " + newValue.getKindCase()); } if (kind == KindCase.STRING_VALUE) { merged = (String) merged + newValue.getStringValue(); } else { concatLists( (List) merged, newValue.getListValue().getValuesList()); } } if (kind == KindCase.STRING_VALUE) { return nl.topicus.jdbc.shaded.com.google.protobuf.Value.newBuilder().setStringValue((String) merged).build(); } else { return nl.topicus.jdbc.shaded.com.google.protobuf.Value.newBuilder() .setListValue( ListValue.newBuilder().addAllValues((List) merged)) .build(); } } ResultSetMetadata getMetadata() throws SpannerException { if (metadata == null) { if (!ensureReady(StreamValue.METADATA)) { throw newSpannerException(ErrorCode.INTERNAL, "Stream closed without sending metadata"); } } return metadata; } /* * Get the query statistics. Query statistics are delivered with the last PartialResultSet * in the stream. Any attempt to call this method before the caller has finished consuming the * results will throw an exception. */ ResultSetStats getStats() { if (statistics == null) { throw newSpannerException( ErrorCode.INTERNAL, "Stream closed without sending query statistics"); } return statistics; } Type type() { checkState(type != null, "metadata has not been received"); return type; } private boolean ensureReady(StreamValue requiredValue) throws SpannerException { while (current == null || pos >= current.getValuesCount()) { if (!stream.hasNext()) { return false; } current = stream.next(); pos = 0; if (type == null) { // This is the first message on the stream. if (!current.hasMetadata() || !current.getMetadata().hasRowType()) { throw newSpannerException(ErrorCode.INTERNAL, "Missing type metadata in first message"); } metadata = current.getMetadata(); nl.topicus.jdbc.shaded.com.google.spanner.v1.Type typeProto = nl.topicus.jdbc.shaded.com.google.spanner.v1.Type.newBuilder() .setCode(TypeCode.STRUCT) .setStructType(metadata.getRowType()) .build(); try { type = Type.fromProto(typeProto); } catch (IllegalArgumentException e) { throw newSpannerException( ErrorCode.INTERNAL, "Invalid type metadata: " + e.getMessage(), e); } } if (current.hasStats()) { statistics = current.getStats(); } if (requiredValue == StreamValue.METADATA) { return true; } } return true; } void close(@Nullable String message) { stream.close(message); } /* * @param a is a mutable list and b will be concatenated into a. */ private void concatLists(List a, List b) { if (a.size() == 0 || b.size() == 0) { a.addAll(b); return; } else { nl.topicus.jdbc.shaded.com.google.protobuf.Value last = a.get(a.size() - 1); nl.topicus.jdbc.shaded.com.google.protobuf.Value first = b.get(0); KindCase lastKind = last.getKindCase(); KindCase firstKind = first.getKindCase(); if (isMergeable(lastKind) && lastKind == firstKind) { nl.topicus.jdbc.shaded.com.google.protobuf.Value merged = null; if (lastKind == KindCase.STRING_VALUE) { String lastStr = last.getStringValue(); String firstStr = first.getStringValue(); merged = nl.topicus.jdbc.shaded.com.google.protobuf.Value.newBuilder().setStringValue(lastStr + firstStr).build(); } else { // List List mergedList = new ArrayList<>(); mergedList.addAll(last.getListValue().getValuesList()); concatLists(mergedList, first.getListValue().getValuesList()); merged = nl.topicus.jdbc.shaded.com.google.protobuf.Value.newBuilder() .setListValue(ListValue.newBuilder().addAllValues(mergedList)) .build(); } a.set(a.size() - 1, merged); a.addAll(b.subList(1, b.size())); } else { a.addAll(b); } } } private boolean isMergeable(KindCase kind) { return kind == KindCase.STRING_VALUE || kind == KindCase.LIST_VALUE; } } private static double valueProtoToFloat64(nl.topicus.jdbc.shaded.com.google.protobuf.Value proto) { if (proto.getKindCase() == KindCase.STRING_VALUE) { switch (proto.getStringValue()) { case "-Infinity": return Double.NEGATIVE_INFINITY; case "Infinity": return Double.POSITIVE_INFINITY; case "NaN": return Double.NaN; default: // Fall-through to handling below to produce an error. } } if (proto.getKindCase() != KindCase.NUMBER_VALUE) { throw newSpannerException( ErrorCode.INTERNAL, "Invalid value for column type " + Type.float64() + " expected NUMBER_VALUE or STRING_VALUE with value one of" + " \"Infinity\", \"-Infinity\", or \"NaN\" but was " + proto.getKindCase() + (proto.getKindCase() == KindCase.STRING_VALUE ? " with value \"" + proto.getStringValue() + "\"" : "")); } return proto.getNumberValue(); } private static NullPointerException throwNotNull(int columnIndex) { throw new NullPointerException( "Cannot call array getter for column " + columnIndex + " with null elements"); } /** * Memory-optimized base class for {@code ARRAY} and {@code ARRAY} types. Both of * these involve conversions from the type yielded by JSON parsing, which are {@code String} and * {@code BigDecimal} respectively. Rather than construct new wrapper objects for each array * element, we use primitive arrays and a {@code BitSet} to track nulls. */ private abstract static class PrimitiveArray extends AbstractList { private final A data; private final BitSet nulls; private final int size; PrimitiveArray(ListValue protoList) { this.size = protoList.getValuesCount(); A data = newArray(size); BitSet nulls = new BitSet(size); for (int i = 0; i < protoList.getValuesCount(); ++i) { if (protoList.getValues(i).getKindCase() == KindCase.NULL_VALUE) { nulls.set(i); } else { setProto(data, i, protoList.getValues(i)); } } this.data = data; this.nulls = nulls; } PrimitiveArray(A data, BitSet nulls, int size) { this.data = data; this.nulls = nulls; this.size = size; } abstract A newArray(int size); abstract void setProto(A array, int i, nl.topicus.jdbc.shaded.com.google.protobuf.Value protoValue); abstract T get(A array, int i); @Override public T get(int index) { if (index < 0 || index >= size) { throw new ArrayIndexOutOfBoundsException("index=" + index + " size=" + size); } return nulls.get(index) ? null : get(data, index); } @Override public int size() { return size; } A toPrimitiveArray(int columnIndex) { if (nulls.length() > 0) { throw throwNotNull(columnIndex); } A r = newArray(size); System.arraycopy(data, 0, r, 0, size); return r; } } private static class Int64Array extends PrimitiveArray { Int64Array(ListValue protoList) { super(protoList); } Int64Array(long[] data, BitSet nulls) { super(data, nulls, data.length); } @Override long[] newArray(int size) { return new long[size]; } @Override void setProto(long[] array, int i, nl.topicus.jdbc.shaded.com.google.protobuf.Value protoValue) { array[i] = Long.parseLong(protoValue.getStringValue()); } @Override Long get(long[] array, int i) { return array[i]; } } private static class Float64Array extends PrimitiveArray { Float64Array(ListValue protoList) { super(protoList); } Float64Array(double[] data, BitSet nulls) { super(data, nulls, data.length); } @Override double[] newArray(int size) { return new double[size]; } @Override void setProto(double[] array, int i, nl.topicus.jdbc.shaded.com.google.protobuf.Value protoValue) { array[i] = valueProtoToFloat64(protoValue); } @Override Double get(double[] array, int i) { return array[i]; } } }