All Downloads are FREE. Search and download functionalities are using the official Maven repository.

software.amazon.awssdk.services.redshiftdata.DefaultRedshiftDataClient Maven / Gradle / Ivy

/*
 * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
 * the License. A copy of the License is located at
 * 
 * http://aws.amazon.com/apache2.0
 * 
 * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
 * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
 * and limitations under the License.
 */

package software.amazon.awssdk.services.redshiftdata;

import java.util.Collections;
import java.util.List;
import java.util.function.Consumer;
import software.amazon.awssdk.annotations.Generated;
import software.amazon.awssdk.annotations.SdkInternalApi;
import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration;
import software.amazon.awssdk.awscore.client.handler.AwsSyncClientHandler;
import software.amazon.awssdk.awscore.exception.AwsServiceException;
import software.amazon.awssdk.core.ApiName;
import software.amazon.awssdk.core.RequestOverrideConfiguration;
import software.amazon.awssdk.core.client.config.SdkClientConfiguration;
import software.amazon.awssdk.core.client.config.SdkClientOption;
import software.amazon.awssdk.core.client.handler.ClientExecutionParams;
import software.amazon.awssdk.core.client.handler.SyncClientHandler;
import software.amazon.awssdk.core.exception.SdkClientException;
import software.amazon.awssdk.core.http.HttpResponseHandler;
import software.amazon.awssdk.core.metrics.CoreMetric;
import software.amazon.awssdk.core.util.VersionInfo;
import software.amazon.awssdk.metrics.MetricCollector;
import software.amazon.awssdk.metrics.MetricPublisher;
import software.amazon.awssdk.metrics.NoOpMetricCollector;
import software.amazon.awssdk.protocols.core.ExceptionMetadata;
import software.amazon.awssdk.protocols.json.AwsJsonProtocol;
import software.amazon.awssdk.protocols.json.AwsJsonProtocolFactory;
import software.amazon.awssdk.protocols.json.BaseAwsJsonProtocolFactory;
import software.amazon.awssdk.protocols.json.JsonOperationMetadata;
import software.amazon.awssdk.services.redshiftdata.model.ActiveStatementsExceededException;
import software.amazon.awssdk.services.redshiftdata.model.BatchExecuteStatementException;
import software.amazon.awssdk.services.redshiftdata.model.BatchExecuteStatementRequest;
import software.amazon.awssdk.services.redshiftdata.model.BatchExecuteStatementResponse;
import software.amazon.awssdk.services.redshiftdata.model.CancelStatementRequest;
import software.amazon.awssdk.services.redshiftdata.model.CancelStatementResponse;
import software.amazon.awssdk.services.redshiftdata.model.DatabaseConnectionException;
import software.amazon.awssdk.services.redshiftdata.model.DescribeStatementRequest;
import software.amazon.awssdk.services.redshiftdata.model.DescribeStatementResponse;
import software.amazon.awssdk.services.redshiftdata.model.DescribeTableRequest;
import software.amazon.awssdk.services.redshiftdata.model.DescribeTableResponse;
import software.amazon.awssdk.services.redshiftdata.model.ExecuteStatementException;
import software.amazon.awssdk.services.redshiftdata.model.ExecuteStatementRequest;
import software.amazon.awssdk.services.redshiftdata.model.ExecuteStatementResponse;
import software.amazon.awssdk.services.redshiftdata.model.GetStatementResultRequest;
import software.amazon.awssdk.services.redshiftdata.model.GetStatementResultResponse;
import software.amazon.awssdk.services.redshiftdata.model.InternalServerException;
import software.amazon.awssdk.services.redshiftdata.model.ListDatabasesRequest;
import software.amazon.awssdk.services.redshiftdata.model.ListDatabasesResponse;
import software.amazon.awssdk.services.redshiftdata.model.ListSchemasRequest;
import software.amazon.awssdk.services.redshiftdata.model.ListSchemasResponse;
import software.amazon.awssdk.services.redshiftdata.model.ListStatementsRequest;
import software.amazon.awssdk.services.redshiftdata.model.ListStatementsResponse;
import software.amazon.awssdk.services.redshiftdata.model.ListTablesRequest;
import software.amazon.awssdk.services.redshiftdata.model.ListTablesResponse;
import software.amazon.awssdk.services.redshiftdata.model.RedshiftDataException;
import software.amazon.awssdk.services.redshiftdata.model.RedshiftDataRequest;
import software.amazon.awssdk.services.redshiftdata.model.ResourceNotFoundException;
import software.amazon.awssdk.services.redshiftdata.model.ValidationException;
import software.amazon.awssdk.services.redshiftdata.paginators.DescribeTableIterable;
import software.amazon.awssdk.services.redshiftdata.paginators.GetStatementResultIterable;
import software.amazon.awssdk.services.redshiftdata.paginators.ListDatabasesIterable;
import software.amazon.awssdk.services.redshiftdata.paginators.ListSchemasIterable;
import software.amazon.awssdk.services.redshiftdata.paginators.ListStatementsIterable;
import software.amazon.awssdk.services.redshiftdata.paginators.ListTablesIterable;
import software.amazon.awssdk.services.redshiftdata.transform.BatchExecuteStatementRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.CancelStatementRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.DescribeStatementRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.DescribeTableRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.ExecuteStatementRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.GetStatementResultRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.ListDatabasesRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.ListSchemasRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.ListStatementsRequestMarshaller;
import software.amazon.awssdk.services.redshiftdata.transform.ListTablesRequestMarshaller;
import software.amazon.awssdk.utils.Logger;

/**
 * Internal implementation of {@link RedshiftDataClient}.
 *
 * @see RedshiftDataClient#builder()
 */
@Generated("software.amazon.awssdk:codegen")
@SdkInternalApi
final class DefaultRedshiftDataClient implements RedshiftDataClient {
    private static final Logger log = Logger.loggerFor(DefaultRedshiftDataClient.class);

    private final SyncClientHandler clientHandler;

    private final AwsJsonProtocolFactory protocolFactory;

    private final SdkClientConfiguration clientConfiguration;

    private final RedshiftDataServiceClientConfiguration serviceClientConfiguration;

    protected DefaultRedshiftDataClient(RedshiftDataServiceClientConfiguration serviceClientConfiguration,
            SdkClientConfiguration clientConfiguration) {
        this.clientHandler = new AwsSyncClientHandler(clientConfiguration);
        this.clientConfiguration = clientConfiguration;
        this.serviceClientConfiguration = serviceClientConfiguration;
        this.protocolFactory = init(AwsJsonProtocolFactory.builder()).build();
    }

    /**
     * 

* Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). * Depending on the authorization method, use one of the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param batchExecuteStatementRequest * @return Result of the BatchExecuteStatement operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws ActiveStatementsExceededException * The number of active statements exceeds the limit. * @throws BatchExecuteStatementException * An SQL statement encountered an environmental error while running. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.BatchExecuteStatement * @see AWS API Documentation */ @Override public BatchExecuteStatementResponse batchExecuteStatement(BatchExecuteStatementRequest batchExecuteStatementRequest) throws ValidationException, ActiveStatementsExceededException, BatchExecuteStatementException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, BatchExecuteStatementResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, batchExecuteStatementRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "BatchExecuteStatement"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("BatchExecuteStatement").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(batchExecuteStatementRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new BatchExecuteStatementRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* Cancels a running query. To be canceled, a query must be running. *

*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param cancelStatementRequest * @return Result of the CancelStatement operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws ResourceNotFoundException * The Amazon Redshift Data API operation failed due to a missing resource. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.CancelStatement * @see AWS * API Documentation */ @Override public CancelStatementResponse cancelStatement(CancelStatementRequest cancelStatementRequest) throws ValidationException, ResourceNotFoundException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, CancelStatementResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, cancelStatementRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "CancelStatement"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("CancelStatement").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(cancelStatementRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new CancelStatementRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* Describes the details about a specific instance when a query was run by the Amazon Redshift Data API. The * information includes when the query started, when it finished, the query status, the number of rows returned, and * the SQL statement. *

*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param describeStatementRequest * @return Result of the DescribeStatement operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws ResourceNotFoundException * The Amazon Redshift Data API operation failed due to a missing resource. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.DescribeStatement * @see AWS API Documentation */ @Override public DescribeStatementResponse describeStatement(DescribeStatementRequest describeStatementRequest) throws ValidationException, ResourceNotFoundException, InternalServerException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, DescribeStatementResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, describeStatementRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeStatement"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("DescribeStatement").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(describeStatementRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new DescribeStatementRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* Describes the detailed information about a table from metadata in the cluster. The information includes its * columns. A token is returned to page through the column list. Depending on the authorization method, use one of * the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param describeTableRequest * @return Result of the DescribeTable operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.DescribeTable * @see AWS * API Documentation */ @Override public DescribeTableResponse describeTable(DescribeTableRequest describeTableRequest) throws ValidationException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, DescribeTableResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, describeTableRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "DescribeTable"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("DescribeTable").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(describeTableRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new DescribeTableRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* Describes the detailed information about a table from metadata in the cluster. The information includes its * columns. A token is returned to page through the column list. Depending on the authorization method, use one of * the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

*
*

* This is a variant of * {@link #describeTable(software.amazon.awssdk.services.redshiftdata.model.DescribeTableRequest)} operation. The * return type is a custom iterable that can be used to iterate through all the pages. SDK will internally handle * making service calls for you. *

*

* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your * request, you will see the failures only after you start iterating through the iterable. *

* *

* The following are few ways to iterate through the response pages: *

* 1) Using a Stream * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.DescribeTableIterable responses = client.describeTablePaginator(request);
     * responses.stream().forEach(....);
     * }
     * 
* * 2) Using For loop * *
     * {
     *     @code
     *     software.amazon.awssdk.services.redshiftdata.paginators.DescribeTableIterable responses = client
     *             .describeTablePaginator(request);
     *     for (software.amazon.awssdk.services.redshiftdata.model.DescribeTableResponse response : responses) {
     *         // do something;
     *     }
     * }
     * 
* * 3) Use iterator directly * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.DescribeTableIterable responses = client.describeTablePaginator(request);
     * responses.iterator().forEachRemaining(....);
     * }
     * 
*

* Please notice that the configuration of MaxResults won't limit the number of results you get with the * paginator. It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #describeTable(software.amazon.awssdk.services.redshiftdata.model.DescribeTableRequest)} operation. *

* * @param describeTableRequest * @return A custom iterable that can be used to iterate through all the response pages. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.DescribeTable * @see AWS * API Documentation */ @Override public DescribeTableIterable describeTablePaginator(DescribeTableRequest describeTableRequest) throws ValidationException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { return new DescribeTableIterable(this, applyPaginatorUserAgent(describeTableRequest)); } /** *

* Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This * statement must be a single SQL statement. Depending on the authorization method, use one of the following * combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param executeStatementRequest * @return Result of the ExecuteStatement operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws ExecuteStatementException * The SQL statement encountered an environmental error while running. * @throws ActiveStatementsExceededException * The number of active statements exceeds the limit. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ExecuteStatement * @see AWS API Documentation */ @Override public ExecuteStatementResponse executeStatement(ExecuteStatementRequest executeStatementRequest) throws ValidationException, ExecuteStatementException, ActiveStatementsExceededException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, ExecuteStatementResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, executeStatementRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ExecuteStatement"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("ExecuteStatement").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(executeStatementRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new ExecuteStatementRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement * results. *

*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param getStatementResultRequest * @return Result of the GetStatementResult operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws ResourceNotFoundException * The Amazon Redshift Data API operation failed due to a missing resource. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.GetStatementResult * @see AWS API Documentation */ @Override public GetStatementResultResponse getStatementResult(GetStatementResultRequest getStatementResultRequest) throws ValidationException, ResourceNotFoundException, InternalServerException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler( operationMetadata, GetStatementResultResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, getStatementResultRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "GetStatementResult"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("GetStatementResult").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(getStatementResultRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new GetStatementResultRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement * results. *

*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

*
*

* This is a variant of * {@link #getStatementResult(software.amazon.awssdk.services.redshiftdata.model.GetStatementResultRequest)} * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will * internally handle making service calls for you. *

*

* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your * request, you will see the failures only after you start iterating through the iterable. *

* *

* The following are few ways to iterate through the response pages: *

* 1) Using a Stream * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.GetStatementResultIterable responses = client.getStatementResultPaginator(request);
     * responses.stream().forEach(....);
     * }
     * 
* * 2) Using For loop * *
     * {
     *     @code
     *     software.amazon.awssdk.services.redshiftdata.paginators.GetStatementResultIterable responses = client
     *             .getStatementResultPaginator(request);
     *     for (software.amazon.awssdk.services.redshiftdata.model.GetStatementResultResponse response : responses) {
     *         // do something;
     *     }
     * }
     * 
* * 3) Use iterator directly * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.GetStatementResultIterable responses = client.getStatementResultPaginator(request);
     * responses.iterator().forEachRemaining(....);
     * }
     * 
*

* Please notice that the configuration of null won't limit the number of results you get with the paginator. It * only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #getStatementResult(software.amazon.awssdk.services.redshiftdata.model.GetStatementResultRequest)} * operation. *

* * @param getStatementResultRequest * @return A custom iterable that can be used to iterate through all the response pages. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws ResourceNotFoundException * The Amazon Redshift Data API operation failed due to a missing resource. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.GetStatementResult * @see AWS API Documentation */ @Override public GetStatementResultIterable getStatementResultPaginator(GetStatementResultRequest getStatementResultRequest) throws ValidationException, ResourceNotFoundException, InternalServerException, AwsServiceException, SdkClientException, RedshiftDataException { return new GetStatementResultIterable(this, applyPaginatorUserAgent(getStatementResultRequest)); } /** *

* List the databases in a cluster. A token is returned to page through the database list. Depending on the * authorization method, use one of the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param listDatabasesRequest * @return Result of the ListDatabases operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ListDatabases * @see AWS * API Documentation */ @Override public ListDatabasesResponse listDatabases(ListDatabasesRequest listDatabasesRequest) throws ValidationException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, ListDatabasesResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, listDatabasesRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListDatabases"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("ListDatabases").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(listDatabasesRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new ListDatabasesRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* List the databases in a cluster. A token is returned to page through the database list. Depending on the * authorization method, use one of the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

*
*

* This is a variant of * {@link #listDatabases(software.amazon.awssdk.services.redshiftdata.model.ListDatabasesRequest)} operation. The * return type is a custom iterable that can be used to iterate through all the pages. SDK will internally handle * making service calls for you. *

*

* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your * request, you will see the failures only after you start iterating through the iterable. *

* *

* The following are few ways to iterate through the response pages: *

* 1) Using a Stream * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.ListDatabasesIterable responses = client.listDatabasesPaginator(request);
     * responses.stream().forEach(....);
     * }
     * 
* * 2) Using For loop * *
     * {
     *     @code
     *     software.amazon.awssdk.services.redshiftdata.paginators.ListDatabasesIterable responses = client
     *             .listDatabasesPaginator(request);
     *     for (software.amazon.awssdk.services.redshiftdata.model.ListDatabasesResponse response : responses) {
     *         // do something;
     *     }
     * }
     * 
* * 3) Use iterator directly * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.ListDatabasesIterable responses = client.listDatabasesPaginator(request);
     * responses.iterator().forEachRemaining(....);
     * }
     * 
*

* Please notice that the configuration of MaxResults won't limit the number of results you get with the * paginator. It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listDatabases(software.amazon.awssdk.services.redshiftdata.model.ListDatabasesRequest)} operation. *

* * @param listDatabasesRequest * @return A custom iterable that can be used to iterate through all the response pages. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ListDatabases * @see AWS * API Documentation */ @Override public ListDatabasesIterable listDatabasesPaginator(ListDatabasesRequest listDatabasesRequest) throws ValidationException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { return new ListDatabasesIterable(this, applyPaginatorUserAgent(listDatabasesRequest)); } /** *

* Lists the schemas in a database. A token is returned to page through the schema list. Depending on the * authorization method, use one of the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param listSchemasRequest * @return Result of the ListSchemas operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ListSchemas * @see AWS API * Documentation */ @Override public ListSchemasResponse listSchemas(ListSchemasRequest listSchemasRequest) throws ValidationException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, ListSchemasResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, listSchemasRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListSchemas"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("ListSchemas").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(listSchemasRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new ListSchemasRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* Lists the schemas in a database. A token is returned to page through the schema list. Depending on the * authorization method, use one of the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

*
*

* This is a variant of {@link #listSchemas(software.amazon.awssdk.services.redshiftdata.model.ListSchemasRequest)} * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will * internally handle making service calls for you. *

*

* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your * request, you will see the failures only after you start iterating through the iterable. *

* *

* The following are few ways to iterate through the response pages: *

* 1) Using a Stream * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.ListSchemasIterable responses = client.listSchemasPaginator(request);
     * responses.stream().forEach(....);
     * }
     * 
* * 2) Using For loop * *
     * {
     *     @code
     *     software.amazon.awssdk.services.redshiftdata.paginators.ListSchemasIterable responses = client.listSchemasPaginator(request);
     *     for (software.amazon.awssdk.services.redshiftdata.model.ListSchemasResponse response : responses) {
     *         // do something;
     *     }
     * }
     * 
* * 3) Use iterator directly * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.ListSchemasIterable responses = client.listSchemasPaginator(request);
     * responses.iterator().forEachRemaining(....);
     * }
     * 
*

* Please notice that the configuration of MaxResults won't limit the number of results you get with the * paginator. It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listSchemas(software.amazon.awssdk.services.redshiftdata.model.ListSchemasRequest)} operation. *

* * @param listSchemasRequest * @return A custom iterable that can be used to iterate through all the response pages. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ListSchemas * @see AWS API * Documentation */ @Override public ListSchemasIterable listSchemasPaginator(ListSchemasRequest listSchemasRequest) throws ValidationException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { return new ListSchemasIterable(this, applyPaginatorUserAgent(listSchemasRequest)); } /** *

* List of SQL statements. By default, only finished statements are shown. A token is returned to page through the * statement list. *

*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param listStatementsRequest * @return Result of the ListStatements operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ListStatements * @see AWS * API Documentation */ @Override public ListStatementsResponse listStatements(ListStatementsRequest listStatementsRequest) throws ValidationException, InternalServerException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, ListStatementsResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, listStatementsRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListStatements"); return clientHandler.execute(new ClientExecutionParams() .withOperationName("ListStatements").withResponseHandler(responseHandler) .withErrorResponseHandler(errorResponseHandler).withInput(listStatementsRequest) .withMetricCollector(apiCallMetricCollector) .withMarshaller(new ListStatementsRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* List of SQL statements. By default, only finished statements are shown. A token is returned to page through the * statement list. *

*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

*
*

* This is a variant of * {@link #listStatements(software.amazon.awssdk.services.redshiftdata.model.ListStatementsRequest)} operation. The * return type is a custom iterable that can be used to iterate through all the pages. SDK will internally handle * making service calls for you. *

*

* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your * request, you will see the failures only after you start iterating through the iterable. *

* *

* The following are few ways to iterate through the response pages: *

* 1) Using a Stream * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.ListStatementsIterable responses = client.listStatementsPaginator(request);
     * responses.stream().forEach(....);
     * }
     * 
* * 2) Using For loop * *
     * {
     *     @code
     *     software.amazon.awssdk.services.redshiftdata.paginators.ListStatementsIterable responses = client
     *             .listStatementsPaginator(request);
     *     for (software.amazon.awssdk.services.redshiftdata.model.ListStatementsResponse response : responses) {
     *         // do something;
     *     }
     * }
     * 
* * 3) Use iterator directly * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.ListStatementsIterable responses = client.listStatementsPaginator(request);
     * responses.iterator().forEachRemaining(....);
     * }
     * 
*

* Please notice that the configuration of MaxResults won't limit the number of results you get with the * paginator. It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listStatements(software.amazon.awssdk.services.redshiftdata.model.ListStatementsRequest)} operation. *

* * @param listStatementsRequest * @return A custom iterable that can be used to iterate through all the response pages. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ListStatements * @see AWS * API Documentation */ @Override public ListStatementsIterable listStatementsPaginator(ListStatementsRequest listStatementsRequest) throws ValidationException, InternalServerException, AwsServiceException, SdkClientException, RedshiftDataException { return new ListStatementsIterable(this, applyPaginatorUserAgent(listStatementsRequest)); } /** *

* List the tables in a database. If neither SchemaPattern nor TablePattern are specified, * then all tables in the database are returned. A token is returned to page through the table list. Depending on * the authorization method, use one of the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

* * @param listTablesRequest * @return Result of the ListTables operation returned by the service. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ListTables * @see AWS API * Documentation */ @Override public ListTablesResponse listTables(ListTablesRequest listTablesRequest) throws ValidationException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { JsonOperationMetadata operationMetadata = JsonOperationMetadata.builder().hasStreamingSuccessResponse(false) .isPayloadJson(true).build(); HttpResponseHandler responseHandler = protocolFactory.createResponseHandler(operationMetadata, ListTablesResponse::builder); HttpResponseHandler errorResponseHandler = createErrorResponseHandler(protocolFactory, operationMetadata); List metricPublishers = resolveMetricPublishers(clientConfiguration, listTablesRequest .overrideConfiguration().orElse(null)); MetricCollector apiCallMetricCollector = metricPublishers.isEmpty() ? NoOpMetricCollector.create() : MetricCollector .create("ApiCall"); try { apiCallMetricCollector.reportMetric(CoreMetric.SERVICE_ID, "Redshift Data"); apiCallMetricCollector.reportMetric(CoreMetric.OPERATION_NAME, "ListTables"); return clientHandler .execute(new ClientExecutionParams().withOperationName("ListTables") .withResponseHandler(responseHandler).withErrorResponseHandler(errorResponseHandler) .withInput(listTablesRequest).withMetricCollector(apiCallMetricCollector) .withMarshaller(new ListTablesRequestMarshaller(protocolFactory))); } finally { metricPublishers.forEach(p -> p.publish(apiCallMetricCollector.collect())); } } /** *

* List the tables in a database. If neither SchemaPattern nor TablePattern are specified, * then all tables in the database are returned. A token is returned to page through the table list. Depending on * the authorization method, use one of the following combinations of request parameters: *

*
    *
  • *

    * Secrets Manager - when connecting to a cluster, provide the secret-arn of a secret stored in Secrets * Manager which has username and password. The specified secret contains credentials to * connect to the database you specify. When you are connecting to a cluster, you also supply the * database name, If you provide a cluster identifier (dbClusterIdentifier), it must match the cluster * identifier stored in the secret. When you are connecting to a serverless workgroup, you also supply the database * name. *

    *
  • *
  • *

    * Temporary credentials - when connecting to your data warehouse, choose one of the following options: *

    *
      *
    • *

      * When connecting to a serverless workgroup, specify the workgroup name and database name. The database user name * is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has the database user * name IAM:foo. Also, permission to call the redshift-serverless:GetCredentials operation * is required. *

      *
    • *
    • *

      * When connecting to a cluster as an IAM identity, specify the cluster identifier and the database name. The * database user name is derived from the IAM identity. For example, arn:iam::123456789012:user:foo has * the database user name IAM:foo. Also, permission to call the * redshift:GetClusterCredentialsWithIAM operation is required. *

      *
    • *
    • *

      * When connecting to a cluster as a database user, specify the cluster identifier, the database name, and the * database user name. Also, permission to call the redshift:GetClusterCredentials operation is * required. *

      *
    • *
    *
  • *
*

* For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in * the Amazon Redshift Management Guide. *

*
*

* This is a variant of {@link #listTables(software.amazon.awssdk.services.redshiftdata.model.ListTablesRequest)} * operation. The return type is a custom iterable that can be used to iterate through all the pages. SDK will * internally handle making service calls for you. *

*

* When this operation is called, a custom iterable is returned but no service calls are made yet. So there is no * guarantee that the request is valid. As you iterate through the iterable, SDK will start lazily loading response * pages by making service calls until there are no pages left or your iteration stops. If there are errors in your * request, you will see the failures only after you start iterating through the iterable. *

* *

* The following are few ways to iterate through the response pages: *

* 1) Using a Stream * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.ListTablesIterable responses = client.listTablesPaginator(request);
     * responses.stream().forEach(....);
     * }
     * 
* * 2) Using For loop * *
     * {
     *     @code
     *     software.amazon.awssdk.services.redshiftdata.paginators.ListTablesIterable responses = client.listTablesPaginator(request);
     *     for (software.amazon.awssdk.services.redshiftdata.model.ListTablesResponse response : responses) {
     *         // do something;
     *     }
     * }
     * 
* * 3) Use iterator directly * *
     * {@code
     * software.amazon.awssdk.services.redshiftdata.paginators.ListTablesIterable responses = client.listTablesPaginator(request);
     * responses.iterator().forEachRemaining(....);
     * }
     * 
*

* Please notice that the configuration of MaxResults won't limit the number of results you get with the * paginator. It only limits the number of results in each page. *

*

* Note: If you prefer to have control on service calls, use the * {@link #listTables(software.amazon.awssdk.services.redshiftdata.model.ListTablesRequest)} operation. *

* * @param listTablesRequest * @return A custom iterable that can be used to iterate through all the response pages. * @throws ValidationException * The Amazon Redshift Data API operation failed due to invalid input. * @throws InternalServerException * The Amazon Redshift Data API operation failed due to invalid input. * @throws DatabaseConnectionException * Connection to a database failed. * @throws SdkException * Base class for all exceptions that can be thrown by the SDK (both service and client). Can be used for * catch all scenarios. * @throws SdkClientException * If any client side error occurs such as an IO related failure, failure to get credentials, etc. * @throws RedshiftDataException * Base class for all service exceptions. Unknown exceptions will be thrown as an instance of this type. * @sample RedshiftDataClient.ListTables * @see AWS API * Documentation */ @Override public ListTablesIterable listTablesPaginator(ListTablesRequest listTablesRequest) throws ValidationException, InternalServerException, DatabaseConnectionException, AwsServiceException, SdkClientException, RedshiftDataException { return new ListTablesIterable(this, applyPaginatorUserAgent(listTablesRequest)); } private T applyPaginatorUserAgent(T request) { Consumer userAgentApplier = b -> b.addApiName(ApiName.builder() .version(VersionInfo.SDK_VERSION).name("PAGINATED").build()); AwsRequestOverrideConfiguration overrideConfiguration = request.overrideConfiguration() .map(c -> c.toBuilder().applyMutation(userAgentApplier).build()) .orElse((AwsRequestOverrideConfiguration.builder().applyMutation(userAgentApplier).build())); return (T) request.toBuilder().overrideConfiguration(overrideConfiguration).build(); } @Override public final String serviceName() { return SERVICE_NAME; } private static List resolveMetricPublishers(SdkClientConfiguration clientConfiguration, RequestOverrideConfiguration requestOverrideConfiguration) { List publishers = null; if (requestOverrideConfiguration != null) { publishers = requestOverrideConfiguration.metricPublishers(); } if (publishers == null || publishers.isEmpty()) { publishers = clientConfiguration.option(SdkClientOption.METRIC_PUBLISHERS); } if (publishers == null) { publishers = Collections.emptyList(); } return publishers; } private HttpResponseHandler createErrorResponseHandler(BaseAwsJsonProtocolFactory protocolFactory, JsonOperationMetadata operationMetadata) { return protocolFactory.createErrorResponseHandler(operationMetadata); } private > T init(T builder) { return builder .clientConfiguration(clientConfiguration) .defaultServiceExceptionSupplier(RedshiftDataException::builder) .protocol(AwsJsonProtocol.AWS_JSON) .protocolVersion("1.1") .registerModeledException( ExceptionMetadata.builder().errorCode("ActiveStatementsExceededException") .exceptionBuilderSupplier(ActiveStatementsExceededException::builder).httpStatusCode(400).build()) .registerModeledException( ExceptionMetadata.builder().errorCode("ValidationException") .exceptionBuilderSupplier(ValidationException::builder).httpStatusCode(400).build()) .registerModeledException( ExceptionMetadata.builder().errorCode("DatabaseConnectionException") .exceptionBuilderSupplier(DatabaseConnectionException::builder).httpStatusCode(500).build()) .registerModeledException( ExceptionMetadata.builder().errorCode("ResourceNotFoundException") .exceptionBuilderSupplier(ResourceNotFoundException::builder).httpStatusCode(400).build()) .registerModeledException( ExceptionMetadata.builder().errorCode("InternalServerException") .exceptionBuilderSupplier(InternalServerException::builder).httpStatusCode(500).build()) .registerModeledException( ExceptionMetadata.builder().errorCode("ExecuteStatementException") .exceptionBuilderSupplier(ExecuteStatementException::builder).httpStatusCode(500).build()) .registerModeledException( ExceptionMetadata.builder().errorCode("BatchExecuteStatementException") .exceptionBuilderSupplier(BatchExecuteStatementException::builder).httpStatusCode(500).build()); } @Override public final RedshiftDataServiceClientConfiguration serviceClientConfiguration() { return this.serviceClientConfiguration; } @Override public void close() { clientHandler.close(); } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy