commonMain.aws.sdk.kotlin.services.cloudwatchlogs.CloudWatchLogsClient.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of cloudwatchlogs-jvm Show documentation
Show all versions of cloudwatchlogs-jvm Show documentation
The AWS SDK for Kotlin client for CloudWatch Logs
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.cloudwatchlogs
import aws.sdk.kotlin.runtime.auth.credentials.DefaultChainCredentialsProvider
import aws.sdk.kotlin.runtime.auth.credentials.internal.manage
import aws.sdk.kotlin.runtime.client.AwsSdkClientConfig
import aws.sdk.kotlin.runtime.config.AbstractAwsSdkClientFactory
import aws.sdk.kotlin.runtime.config.endpoints.resolveEndpointUrl
import aws.sdk.kotlin.runtime.config.profile.AwsProfile
import aws.sdk.kotlin.runtime.config.profile.AwsSharedConfig
import aws.sdk.kotlin.runtime.http.retries.AwsRetryPolicy
import aws.sdk.kotlin.services.cloudwatchlogs.auth.CloudWatchLogsAuthSchemeProvider
import aws.sdk.kotlin.services.cloudwatchlogs.auth.DefaultCloudWatchLogsAuthSchemeProvider
import aws.sdk.kotlin.services.cloudwatchlogs.endpoints.CloudWatchLogsEndpointParameters
import aws.sdk.kotlin.services.cloudwatchlogs.endpoints.CloudWatchLogsEndpointProvider
import aws.sdk.kotlin.services.cloudwatchlogs.endpoints.DefaultCloudWatchLogsEndpointProvider
import aws.sdk.kotlin.services.cloudwatchlogs.model.AssociateKmsKeyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.AssociateKmsKeyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.CancelExportTaskRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.CancelExportTaskResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateDeliveryRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateDeliveryResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateExportTaskRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateExportTaskResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateLogAnomalyDetectorRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateLogAnomalyDetectorResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateLogGroupRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateLogGroupResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateLogStreamRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.CreateLogStreamResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteAccountPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteAccountPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDataProtectionPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDataProtectionPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDeliveryDestinationPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDeliveryDestinationPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDeliveryDestinationRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDeliveryDestinationResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDeliveryRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDeliveryResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDeliverySourceRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDeliverySourceResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDestinationRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteDestinationResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteLogAnomalyDetectorRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteLogAnomalyDetectorResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteLogGroupRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteLogGroupResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteLogStreamRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteLogStreamResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteMetricFilterRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteMetricFilterResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteQueryDefinitionRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteQueryDefinitionResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteResourcePolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteResourcePolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteRetentionPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteRetentionPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteSubscriptionFilterRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DeleteSubscriptionFilterResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeAccountPoliciesRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeAccountPoliciesResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeConfigurationTemplatesRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeConfigurationTemplatesResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeDeliveriesRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeDeliveriesResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeDeliveryDestinationsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeDeliveryDestinationsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeDeliverySourcesRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeDeliverySourcesResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeDestinationsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeDestinationsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeExportTasksRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeExportTasksResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeLogGroupsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeLogGroupsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeLogStreamsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeLogStreamsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeMetricFiltersRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeMetricFiltersResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeQueriesRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeQueriesResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeQueryDefinitionsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeQueryDefinitionsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeResourcePoliciesRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeResourcePoliciesResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeSubscriptionFiltersRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DescribeSubscriptionFiltersResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.DisassociateKmsKeyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.DisassociateKmsKeyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.FilterLogEventsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.FilterLogEventsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDataProtectionPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDataProtectionPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDeliveryDestinationPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDeliveryDestinationPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDeliveryDestinationRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDeliveryDestinationResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDeliveryRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDeliveryResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDeliverySourceRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetDeliverySourceResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetLogAnomalyDetectorRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetLogAnomalyDetectorResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetLogEventsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetLogEventsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetLogGroupFieldsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetLogGroupFieldsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetLogRecordRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetLogRecordResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetQueryResultsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.GetQueryResultsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.ListAnomaliesRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.ListAnomaliesResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.ListLogAnomalyDetectorsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.ListLogAnomalyDetectorsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.ListTagsForResourceRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.ListTagsForResourceResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.ListTagsLogGroupRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.ListTagsLogGroupResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutAccountPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutAccountPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDataProtectionPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDataProtectionPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDeliveryDestinationPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDeliveryDestinationPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDeliveryDestinationRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDeliveryDestinationResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDeliverySourceRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDeliverySourceResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDestinationPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDestinationPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDestinationRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutDestinationResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutLogEventsRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutLogEventsResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutMetricFilterRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutMetricFilterResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutQueryDefinitionRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutQueryDefinitionResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutResourcePolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutResourcePolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutRetentionPolicyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutRetentionPolicyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutSubscriptionFilterRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.PutSubscriptionFilterResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.StartLiveTailRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.StartLiveTailResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.StartQueryRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.StartQueryResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.StopQueryRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.StopQueryResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.TagLogGroupRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.TagLogGroupResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.TagResourceRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.TagResourceResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.TestMetricFilterRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.TestMetricFilterResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.UntagLogGroupRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.UntagLogGroupResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.UntagResourceRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.UntagResourceResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.UpdateAnomalyRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.UpdateAnomalyResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.UpdateDeliveryConfigurationRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.UpdateDeliveryConfigurationResponse
import aws.sdk.kotlin.services.cloudwatchlogs.model.UpdateLogAnomalyDetectorRequest
import aws.sdk.kotlin.services.cloudwatchlogs.model.UpdateLogAnomalyDetectorResponse
import aws.smithy.kotlin.runtime.auth.awscredentials.CredentialsProvider
import aws.smithy.kotlin.runtime.auth.awscredentials.CredentialsProviderConfig
import aws.smithy.kotlin.runtime.awsprotocol.ClockSkewInterceptor
import aws.smithy.kotlin.runtime.client.AbstractSdkClientBuilder
import aws.smithy.kotlin.runtime.client.AbstractSdkClientFactory
import aws.smithy.kotlin.runtime.client.IdempotencyTokenConfig
import aws.smithy.kotlin.runtime.client.IdempotencyTokenProvider
import aws.smithy.kotlin.runtime.client.LogMode
import aws.smithy.kotlin.runtime.client.RetryClientConfig
import aws.smithy.kotlin.runtime.client.RetryStrategyClientConfig
import aws.smithy.kotlin.runtime.client.RetryStrategyClientConfigImpl
import aws.smithy.kotlin.runtime.client.SdkClient
import aws.smithy.kotlin.runtime.client.SdkClientConfig
import aws.smithy.kotlin.runtime.http.auth.AuthScheme
import aws.smithy.kotlin.runtime.http.auth.HttpAuthConfig
import aws.smithy.kotlin.runtime.http.config.HttpClientConfig
import aws.smithy.kotlin.runtime.http.config.HttpEngineConfig
import aws.smithy.kotlin.runtime.http.engine.HttpClientEngine
import aws.smithy.kotlin.runtime.http.engine.HttpEngineConfigImpl
import aws.smithy.kotlin.runtime.http.interceptors.HttpInterceptor
import aws.smithy.kotlin.runtime.net.url.Url
import aws.smithy.kotlin.runtime.retries.RetryStrategy
import aws.smithy.kotlin.runtime.retries.policy.RetryPolicy
import aws.smithy.kotlin.runtime.telemetry.Global
import aws.smithy.kotlin.runtime.telemetry.TelemetryConfig
import aws.smithy.kotlin.runtime.telemetry.TelemetryProvider
import aws.smithy.kotlin.runtime.util.LazyAsyncValue
import kotlin.collections.List
import kotlin.jvm.JvmStatic
public const val ServiceId: String = "CloudWatch Logs"
public const val SdkVersion: String = "1.3.72"
public const val ServiceApiVersion: String = "2014-03-28"
/**
* You can use Amazon CloudWatch Logs to monitor, store, and access your log files from EC2 instances, CloudTrail, and other sources. You can then retrieve the associated log data from CloudWatch Logs using the CloudWatch console. Alternatively, you can use CloudWatch Logs commands in the Amazon Web Services CLI, CloudWatch Logs API, or CloudWatch Logs SDK.
*
* You can use CloudWatch Logs to:
* + **Monitor logs from EC2 instances in real time**: You can use CloudWatch Logs to monitor applications and systems using log data. For example, CloudWatch Logs can track the number of errors that occur in your application logs. Then, it can send you a notification whenever the rate of errors exceeds a threshold that you specify. CloudWatch Logs uses your log data for monitoring so no code changes are required. For example, you can monitor application logs for specific literal terms (such as "NullReferenceException"). You can also count the number of occurrences of a literal term at a particular position in log data (such as "404" status codes in an Apache access log). When the term you are searching for is found, CloudWatch Logs reports the data to a CloudWatch metric that you specify.
* + **Monitor CloudTrail logged events**: You can create alarms in CloudWatch and receive notifications of particular API activity as captured by CloudTrail. You can use the notification to perform troubleshooting.
* + **Archive log data**: You can use CloudWatch Logs to store your log data in highly durable storage. You can change the log retention setting so that any log events earlier than this setting are automatically deleted. The CloudWatch Logs agent helps to quickly send both rotated and non-rotated log data off of a host and into the log service. You can then access the raw log data when you need it.
*/
public interface CloudWatchLogsClient : SdkClient {
/**
* CloudWatchLogsClient's configuration
*/
public override val config: Config
public companion object : AbstractAwsSdkClientFactory()
{
@JvmStatic
override fun builder(): Builder = Builder()
override fun finalizeConfig(builder: Builder) {
super.finalizeConfig(builder)
builder.config.interceptors.add(0, ClockSkewInterceptor())
}
override suspend fun finalizeEnvironmentalConfig(builder: Builder, sharedConfig: LazyAsyncValue, activeProfile: LazyAsyncValue) {
super.finalizeEnvironmentalConfig(builder, sharedConfig, activeProfile)
builder.config.endpointUrl = builder.config.endpointUrl ?: resolveEndpointUrl(
sharedConfig,
"CloudWatchLogs",
"CLOUDWATCH_LOGS",
"cloudwatch_logs",
)
}
}
public class Builder internal constructor(): AbstractSdkClientBuilder() {
override val config: Config.Builder = Config.Builder()
override fun newClient(config: Config): CloudWatchLogsClient = DefaultCloudWatchLogsClient(config)
}
public class Config private constructor(builder: Builder) : AwsSdkClientConfig, CredentialsProviderConfig, HttpAuthConfig, HttpClientConfig, HttpEngineConfig by builder.buildHttpEngineConfig(), IdempotencyTokenConfig, RetryClientConfig, RetryStrategyClientConfig by builder.buildRetryStrategyClientConfig(), SdkClientConfig, TelemetryConfig {
override val clientName: String = builder.clientName
override val region: String? = builder.region
override val authSchemes: kotlin.collections.List = builder.authSchemes
override val credentialsProvider: CredentialsProvider = builder.credentialsProvider ?: DefaultChainCredentialsProvider(httpClient = httpClient, region = region).manage()
public val endpointProvider: CloudWatchLogsEndpointProvider = builder.endpointProvider ?: DefaultCloudWatchLogsEndpointProvider()
public val endpointUrl: Url? = builder.endpointUrl
override val idempotencyTokenProvider: IdempotencyTokenProvider = builder.idempotencyTokenProvider ?: IdempotencyTokenProvider.Default
override val interceptors: kotlin.collections.List = builder.interceptors
override val logMode: LogMode = builder.logMode ?: LogMode.Default
override val retryPolicy: RetryPolicy = builder.retryPolicy ?: AwsRetryPolicy.Default
override val telemetryProvider: TelemetryProvider = builder.telemetryProvider ?: TelemetryProvider.Global
override val useDualStack: Boolean = builder.useDualStack ?: false
override val useFips: Boolean = builder.useFips ?: false
override val applicationId: String? = builder.applicationId
public val authSchemeProvider: CloudWatchLogsAuthSchemeProvider = builder.authSchemeProvider ?: DefaultCloudWatchLogsAuthSchemeProvider()
public companion object {
public inline operator fun invoke(block: Builder.() -> kotlin.Unit): Config = Builder().apply(block).build()
}
public fun toBuilder(): Builder = Builder().apply {
clientName = [email protected]
region = [email protected]
authSchemes = [email protected]
credentialsProvider = [email protected]
endpointProvider = [email protected]
endpointUrl = [email protected]
httpClient = [email protected]
idempotencyTokenProvider = [email protected]
interceptors = [email protected]()
logMode = [email protected]
retryPolicy = [email protected]
retryStrategy = [email protected]
telemetryProvider = [email protected]
useDualStack = [email protected]
useFips = [email protected]
applicationId = [email protected]
authSchemeProvider = [email protected]
}
public class Builder : AwsSdkClientConfig.Builder, CredentialsProviderConfig.Builder, HttpAuthConfig.Builder, HttpClientConfig.Builder, HttpEngineConfig.Builder by HttpEngineConfigImpl.BuilderImpl(), IdempotencyTokenConfig.Builder, RetryClientConfig.Builder, RetryStrategyClientConfig.Builder by RetryStrategyClientConfigImpl.BuilderImpl(), SdkClientConfig.Builder, TelemetryConfig.Builder {
/**
* A reader-friendly name for the client.
*/
override var clientName: String = "CloudWatch Logs"
/**
* The AWS region (e.g. `us-west-2`) to make requests to. See about AWS
* [global infrastructure](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) for more
* information
*/
override var region: String? = null
/**
* Register new or override default [AuthScheme]s configured for this client. By default, the set
* of auth schemes configured comes from the service model. An auth scheme configured explicitly takes
* precedence over the defaults and can be used to customize identity resolution and signing for specific
* authentication schemes.
*/
override var authSchemes: kotlin.collections.List = emptyList()
/**
* The AWS credentials provider to use for authenticating requests. If not provided a
* [aws.sdk.kotlin.runtime.auth.credentials.DefaultChainCredentialsProvider] instance will be used.
* NOTE: The caller is responsible for managing the lifetime of the provider when set. The SDK
* client will not close it when the client is closed.
*/
override var credentialsProvider: CredentialsProvider? = null
/**
* The endpoint provider used to determine where to make service requests. **This is an advanced config
* option.**
*
* Endpoint resolution occurs as part of the workflow for every request made via the service client.
*
* The inputs to endpoint resolution are defined on a per-service basis (see [EndpointParameters]).
*/
public var endpointProvider: CloudWatchLogsEndpointProvider? = null
/**
* A custom endpoint to route requests to. The endpoint set here is passed to the configured
* [endpointProvider], which may inspect and modify it as needed.
*
* Setting a custom endpointUrl should generally be preferred to overriding the [endpointProvider] and is
* the recommended way to route requests to development or preview instances of a service.
*
* **This is an advanced config option.**
*/
public var endpointUrl: Url? = null
/**
* Override the default idempotency token generator. SDK clients will generate tokens for members
* that represent idempotent tokens when not explicitly set by the caller using this generator.
*/
override var idempotencyTokenProvider: IdempotencyTokenProvider? = null
/**
* Add an [aws.smithy.kotlin.runtime.client.Interceptor] that will have access to read and modify
* the request and response objects as they are processed by the SDK.
* Interceptors added using this method are executed in the order they are configured and are always
* later than any added automatically by the SDK.
*/
override var interceptors: kotlin.collections.MutableList = kotlin.collections.mutableListOf()
/**
* Configure events that will be logged. By default clients will not output
* raw requests or responses. Use this setting to opt-in to additional debug logging.
*
* This can be used to configure logging of requests, responses, retries, etc of SDK clients.
*
* **NOTE**: Logging of raw requests or responses may leak sensitive information! It may also have
* performance considerations when dumping the request/response body. This is primarily a tool for
* debug purposes.
*/
override var logMode: LogMode? = null
/**
* The policy to use for evaluating operation results and determining whether/how to retry.
*/
override var retryPolicy: RetryPolicy? = null
/**
* The telemetry provider used to instrument the SDK operations with. By default, the global telemetry
* provider will be used.
*/
override var telemetryProvider: TelemetryProvider? = null
/**
* Flag to toggle whether to use dual-stack endpoints when making requests.
* See [https://docs.aws.amazon.com/sdkref/latest/guide/feature-endpoints.html] for more information.
* ` Disabled by default.
*/
override var useDualStack: Boolean? = null
/**
* Flag to toggle whether to use [FIPS](https://aws.amazon.com/compliance/fips/) endpoints when making requests.
* ` Disabled by default.
*/
override var useFips: Boolean? = null
/**
* An optional application specific identifier.
* When set it will be appended to the User-Agent header of every request in the form of: `app/{applicationId}`.
* When not explicitly set, the value will be loaded from the following locations:
*
* - JVM System Property: `aws.userAgentAppId`
* - Environment variable: `AWS_SDK_UA_APP_ID`
* - Shared configuration profile attribute: `sdk_ua_app_id`
*
* See [shared configuration settings](https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html)
* reference for more information on environment variables and shared config settings.
*/
override var applicationId: String? = null
/**
* Configure the provider used to resolve the authentication scheme to use for a particular operation.
*/
public var authSchemeProvider: CloudWatchLogsAuthSchemeProvider? = null
override fun build(): Config = Config(this)
}
}
/**
* Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.
*
* When you use `AssociateKmsKey`, you specify either the `logGroupName` parameter or the `resourceIdentifier` parameter. You can't specify both of those parameters in the same operation.
* + Specify the `logGroupName` parameter to cause all log events stored in the log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an `AssociateKmsKey` operation with the `resourceIdentifier` parameter that specifies a `query-result` resource.
* + Specify the `resourceIdentifier` parameter with a `query-result` resource, to use that key to encrypt the stored results of all future [StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) operations in the account. The response from a [GetQueryResults](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) operation will still return the query results in plain text.Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method.If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used.
*
* If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.
*
* CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see [Using Symmetric and Asymmetric Keys](https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
*
* It can take up to 5 minutes for this operation to take effect.
*
* If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an `InvalidParameterException` error.
*/
public suspend fun associateKmsKey(input: AssociateKmsKeyRequest): AssociateKmsKeyResponse
/**
* Cancels the specified export task.
*
* The task must be in the `PENDING` or `RUNNING` state.
*/
public suspend fun cancelExportTask(input: CancelExportTaskRequest): CancelExportTaskResponse
/**
* Creates a *delivery*. A delivery is a connection between a logical *delivery source* and a logical *delivery destination* that you have already created.
*
* Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as **Supported [V2 Permissions]** in the table at [Enabling logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
*
* To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
* + Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see [PutDeliverySource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html).
* + Create a *delivery destination*, which is a logical object that represents the actual delivery destination. For more information, see [PutDeliveryDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
* + If you are delivering logs cross-account, you must use [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
* + Use `CreateDelivery` to create a *delivery* by pairing exactly one delivery source and one delivery destination.
*
* You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
*
* To update an existing delivery configuration, use [UpdateDeliveryConfiguration](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UpdateDeliveryConfiguration.html).
*/
public suspend fun createDelivery(input: CreateDeliveryRequest): CreateDeliveryResponse
/**
* Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a `CreateExportTask` operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
*
* Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
*
* Exporting to S3 buckets that are encrypted with AES-256 is supported.
*
* This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use [DescribeExportTasks](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeExportTasks.html) to get the status of the export task. Each account can only have one active (`RUNNING` or `PENDING`) export task at a time. To cancel an export task, use [CancelExportTask](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CancelExportTask.html).
*
* You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
*
* Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.
*/
public suspend fun createExportTask(input: CreateExportTaskRequest): CreateExportTaskResponse
/**
* Creates an *anomaly detector* that regularly scans one or more log groups and look for patterns and anomalies in the logs.
*
* An anomaly detector can help surface issues by automatically discovering anomalies in your log event traffic. An anomaly detector uses machine learning algorithms to scan log events and find *patterns*. A pattern is a shared text structure that recurs among your log fields. Patterns provide a useful tool for analyzing large sets of logs because a large number of log events can often be compressed into a few patterns.
*
* The anomaly detector uses pattern recognition to find `anomalies`, which are unusual log events. It uses the `evaluationFrequency` to compare current log events and patterns with trained baselines.
*
* Fields within a pattern are called *tokens*. Fields that vary within a pattern, such as a request ID or timestamp, are referred to as *dynamic tokens* and represented by `<*>`.
*
* The following is an example of a pattern:
*
* `[INFO] Request time: <*> ms`
*
* This pattern represents log events like `[INFO] Request time: 327 ms` and other similar log events that differ only by the number, in this csse 327. When the pattern is displayed, the different numbers are replaced by `<*>`
*
* Any parts of log events that are masked as sensitive data are not scanned for anomalies. For more information about masking sensitive data, see [Help protect sensitive log data with masking](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html).
*/
public suspend fun createLogAnomalyDetector(input: CreateLogAnomalyDetectorRequest): CreateLogAnomalyDetectorResponse
/**
* Creates a log group with the specified name. You can create up to 1,000,000 log groups per Region per account.
*
* You must use the following guidelines when naming a log group:
* + Log group names must be unique within a Region for an Amazon Web Services account.
* + Log group names can be between 1 and 512 characters long.
* + Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)
* + Log group names can't start with the string `aws/`
*
* When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use [PutRetentionPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html).
*
* If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
*
* If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an `InvalidParameterException` error.
*
* CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see [Using Symmetric and Asymmetric Keys](https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
*/
public suspend fun createLogGroup(input: CreateLogGroupRequest): CreateLogGroupResponse
/**
* Creates a log stream for the specified log group. A log stream is a sequence of log events that originate from a single source, such as an application instance or a resource that is being monitored.
*
* There is no limit on the number of log streams that you can create for a log group. There is a limit of 50 TPS on `CreateLogStream` operations, after which transactions are throttled.
*
* You must use the following guidelines when naming a log stream:
* + Log stream names must be unique within the log group.
* + Log stream names can be between 1 and 512 characters long.
* + Don't use ':' (colon) or '*' (asterisk) characters.
*/
public suspend fun createLogStream(input: CreateLogStreamRequest): CreateLogStreamResponse
/**
* Deletes a CloudWatch Logs account policy. This stops the policy from applying to all log groups or a subset of log groups in the account. Log-group level policies will still be in effect.
*
* To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are deleting.
* + To delete a data protection policy, you must have the `logs:DeleteDataProtectionPolicy` and `logs:DeleteAccountPolicy` permissions.
* + To delete a subscription filter policy, you must have the `logs:DeleteSubscriptionFilter` and `logs:DeleteAccountPolicy` permissions.
*/
public suspend fun deleteAccountPolicy(input: DeleteAccountPolicyRequest): DeleteAccountPolicyResponse
/**
* Deletes the data protection policy from the specified log group.
*
* For more information about data protection policies, see [PutDataProtectionPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDataProtectionPolicy.html).
*/
public suspend fun deleteDataProtectionPolicy(input: DeleteDataProtectionPolicyRequest): DeleteDataProtectionPolicyResponse
/**
* Deletes s *delivery*. A delivery is a connection between a logical *delivery source* and a logical *delivery destination*. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source.
*/
public suspend fun deleteDelivery(input: DeleteDeliveryRequest): DeleteDeliveryResponse
/**
* Deletes a *delivery destination*. A delivery is a connection between a logical *delivery source* and a logical *delivery destination*.
*
* You can't delete a delivery destination if any current deliveries are associated with it. To find whether any deliveries are associated with this delivery destination, use the [DescribeDeliveries](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) operation and check the `deliveryDestinationArn` field in the results.
*/
public suspend fun deleteDeliveryDestination(input: DeleteDeliveryDestinationRequest): DeleteDeliveryDestinationResponse
/**
* Deletes a delivery destination policy. For more information about these policies, see [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html).
*/
public suspend fun deleteDeliveryDestinationPolicy(input: DeleteDeliveryDestinationPolicyRequest): DeleteDeliveryDestinationPolicyResponse
/**
* Deletes a *delivery source*. A delivery is a connection between a logical *delivery source* and a logical *delivery destination*.
*
* You can't delete a delivery source if any current deliveries are associated with it. To find whether any deliveries are associated with this delivery source, use the [DescribeDeliveries](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) operation and check the `deliverySourceName` field in the results.
*/
public suspend fun deleteDeliverySource(input: DeleteDeliverySourceRequest): DeleteDeliverySourceResponse
/**
* Deletes the specified destination, and eventually disables all the subscription filters that publish to it. This operation does not delete the physical resource encapsulated by the destination.
*/
public suspend fun deleteDestination(input: DeleteDestinationRequest): DeleteDestinationResponse
/**
* Deletes the specified CloudWatch Logs anomaly detector.
*/
public suspend fun deleteLogAnomalyDetector(input: DeleteLogAnomalyDetectorRequest): DeleteLogAnomalyDetectorResponse
/**
* Deletes the specified log group and permanently deletes all the archived log events associated with the log group.
*/
public suspend fun deleteLogGroup(input: DeleteLogGroupRequest): DeleteLogGroupResponse
/**
* Deletes the specified log stream and permanently deletes all the archived log events associated with the log stream.
*/
public suspend fun deleteLogStream(input: DeleteLogStreamRequest): DeleteLogStreamResponse
/**
* Deletes the specified metric filter.
*/
public suspend fun deleteMetricFilter(input: DeleteMetricFilterRequest): DeleteMetricFilterResponse
/**
* Deletes a saved CloudWatch Logs Insights query definition. A query definition contains details about a saved CloudWatch Logs Insights query.
*
* Each `DeleteQueryDefinition` operation can delete one query definition.
*
* You must have the `logs:DeleteQueryDefinition` permission to be able to perform this operation.
*/
public suspend fun deleteQueryDefinition(input: DeleteQueryDefinitionRequest): DeleteQueryDefinitionResponse
/**
* Deletes a resource policy from this account. This revokes the access of the identities in that policy to put log events to this account.
*/
public suspend fun deleteResourcePolicy(input: DeleteResourcePolicyRequest = DeleteResourcePolicyRequest { }): DeleteResourcePolicyResponse
/**
* Deletes the specified retention policy.
*
* Log events do not expire if they belong to log groups without a retention policy.
*/
public suspend fun deleteRetentionPolicy(input: DeleteRetentionPolicyRequest): DeleteRetentionPolicyResponse
/**
* Deletes the specified subscription filter.
*/
public suspend fun deleteSubscriptionFilter(input: DeleteSubscriptionFilterRequest): DeleteSubscriptionFilterResponse
/**
* Returns a list of all CloudWatch Logs account policies in the account.
*/
public suspend fun describeAccountPolicies(input: DescribeAccountPoliciesRequest): DescribeAccountPoliciesResponse
/**
* Use this operation to return the valid and default values that are used when creating delivery sources, delivery destinations, and deliveries. For more information about deliveries, see [CreateDelivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html).
*/
public suspend fun describeConfigurationTemplates(input: DescribeConfigurationTemplatesRequest = DescribeConfigurationTemplatesRequest { }): DescribeConfigurationTemplatesResponse
/**
* Retrieves a list of the deliveries that have been created in the account.
*
* A *delivery* is a connection between a *delivery source*[](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) and a *delivery destination*[](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
*
* A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in [Enable logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*/
public suspend fun describeDeliveries(input: DescribeDeliveriesRequest = DescribeDeliveriesRequest { }): DescribeDeliveriesResponse
/**
* Retrieves a list of the delivery destinations that have been created in the account.
*/
public suspend fun describeDeliveryDestinations(input: DescribeDeliveryDestinationsRequest = DescribeDeliveryDestinationsRequest { }): DescribeDeliveryDestinationsResponse
/**
* Retrieves a list of the delivery sources that have been created in the account.
*/
public suspend fun describeDeliverySources(input: DescribeDeliverySourcesRequest = DescribeDeliverySourcesRequest { }): DescribeDeliverySourcesResponse
/**
* Lists all your destinations. The results are ASCII-sorted by destination name.
*/
public suspend fun describeDestinations(input: DescribeDestinationsRequest = DescribeDestinationsRequest { }): DescribeDestinationsResponse
/**
* Lists the specified export tasks. You can list all your export tasks or filter the results based on task ID or task status.
*/
public suspend fun describeExportTasks(input: DescribeExportTasksRequest = DescribeExportTasksRequest { }): DescribeExportTasksResponse
/**
* Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.
*
* CloudWatch Logs doesn't support IAM policies that control access to the `DescribeLogGroups` action by using the `aws:ResourceTag/key-name ` condition key. Other CloudWatch Logs actions do support the use of the `aws:ResourceTag/key-name ` condition key to control access. For more information about using tags to control access, see [Controlling access to Amazon Web Services resources using tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html).
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend fun describeLogGroups(input: DescribeLogGroupsRequest = DescribeLogGroupsRequest { }): DescribeLogGroupsResponse
/**
* Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.
*
* You can specify the log group to search by using either `logGroupIdentifier` or `logGroupName`. You must include one of these two parameters, but you can't include both.
*
* This operation has a limit of five transactions per second, after which transactions are throttled.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend fun describeLogStreams(input: DescribeLogStreamsRequest = DescribeLogStreamsRequest { }): DescribeLogStreamsResponse
/**
* Lists the specified metric filters. You can list all of the metric filters or filter the results by log name, prefix, metric name, or metric namespace. The results are ASCII-sorted by filter name.
*/
public suspend fun describeMetricFilters(input: DescribeMetricFiltersRequest = DescribeMetricFiltersRequest { }): DescribeMetricFiltersResponse
/**
* Returns a list of CloudWatch Logs Insights queries that are scheduled, running, or have been run recently in this account. You can request all queries or limit it to queries of a specific log group or queries with a certain status.
*/
public suspend fun describeQueries(input: DescribeQueriesRequest = DescribeQueriesRequest { }): DescribeQueriesResponse
/**
* This operation returns a paginated list of your saved CloudWatch Logs Insights query definitions. You can retrieve query definitions from the current account or from a source account that is linked to the current account.
*
* You can use the `queryDefinitionNamePrefix` parameter to limit the results to only the query definitions that have names that start with a certain string.
*/
public suspend fun describeQueryDefinitions(input: DescribeQueryDefinitionsRequest = DescribeQueryDefinitionsRequest { }): DescribeQueryDefinitionsResponse
/**
* Lists the resource policies in this account.
*/
public suspend fun describeResourcePolicies(input: DescribeResourcePoliciesRequest = DescribeResourcePoliciesRequest { }): DescribeResourcePoliciesResponse
/**
* Lists the subscription filters for the specified log group. You can list all the subscription filters or filter the results by prefix. The results are ASCII-sorted by filter name.
*/
public suspend fun describeSubscriptionFilters(input: DescribeSubscriptionFiltersRequest): DescribeSubscriptionFiltersResponse
/**
* Disassociates the specified KMS key from the specified log group or from all CloudWatch Logs Insights query results in the account.
*
* When you use `DisassociateKmsKey`, you specify either the `logGroupName` parameter or the `resourceIdentifier` parameter. You can't specify both of those parameters in the same operation.
* + Specify the `logGroupName` parameter to stop using the KMS key to encrypt future log events ingested and stored in the log group. Instead, they will be encrypted with the default CloudWatch Logs method. The log events that were ingested while the key was associated with the log group are still encrypted with that key. Therefore, CloudWatch Logs will need permissions for the key whenever that data is accessed.
* + Specify the `resourceIdentifier` parameter with the `query-result` resource to stop using the KMS key to encrypt the results of all future [StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) operations in the account. They will instead be encrypted with the default CloudWatch Logs method. The results from queries that ran while the key was associated with the account are still encrypted with that key. Therefore, CloudWatch Logs will need permissions for the key whenever that data is accessed.
*
* It can take up to 5 minutes for this operation to take effect.
*/
public suspend fun disassociateKmsKey(input: DisassociateKmsKeyRequest = DisassociateKmsKeyRequest { }): DisassociateKmsKeyResponse
/**
* Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.
*
* You must have the `logs:FilterLogEvents` permission to perform this operation.
*
* You can specify the log group to search by using either `logGroupIdentifier` or `logGroupName`. You must include one of these two parameters, but you can't include both.
*
* By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token.
*
* The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the `PutLogEvents` request.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend fun filterLogEvents(input: FilterLogEventsRequest = FilterLogEventsRequest { }): FilterLogEventsResponse
/**
* Returns information about a log group data protection policy.
*/
public suspend fun getDataProtectionPolicy(input: GetDataProtectionPolicyRequest): GetDataProtectionPolicyResponse
/**
* Returns complete information about one logical *delivery*. A delivery is a connection between a *delivery source*[](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) and a *delivery destination*[](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
*
* A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in [Enable logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* You need to specify the delivery `id` in this operation. You can find the IDs of the deliveries in your account with the [DescribeDeliveries](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) operation.
*/
public suspend fun getDelivery(input: GetDeliveryRequest): GetDeliveryResponse
/**
* Retrieves complete information about one delivery destination.
*/
public suspend fun getDeliveryDestination(input: GetDeliveryDestinationRequest): GetDeliveryDestinationResponse
/**
* Retrieves the delivery destination policy assigned to the delivery destination that you specify. For more information about delivery destinations and their policies, see [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html).
*/
public suspend fun getDeliveryDestinationPolicy(input: GetDeliveryDestinationPolicyRequest): GetDeliveryDestinationPolicyResponse
/**
* Retrieves complete information about one delivery source.
*/
public suspend fun getDeliverySource(input: GetDeliverySourceRequest): GetDeliverySourceResponse
/**
* Retrieves information about the log anomaly detector that you specify.
*/
public suspend fun getLogAnomalyDetector(input: GetLogAnomalyDetectorRequest): GetLogAnomalyDetectorResponse
/**
* Lists log events from the specified log stream. You can list all of the log events or filter using a time range.
*
* By default, this operation returns as many log events as can fit in a response size of 1MB (up to 10,000 log events). You can get additional log events by specifying one of the tokens in a subsequent call. This operation can return empty results while there are more log events available through the token.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*
* You can specify the log group to search by using either `logGroupIdentifier` or `logGroupName`. You must include one of these two parameters, but you can't include both.
*/
public suspend fun getLogEvents(input: GetLogEventsRequest): GetLogEventsResponse
/**
* Returns a list of the fields that are included in log events in the specified log group. Includes the percentage of log events that contain each field. The search is limited to a time period that you specify.
*
* You can specify the log group to search by using either `logGroupIdentifier` or `logGroupName`. You must specify one of these parameters, but you can't specify both.
*
* In the results, fields that start with `@` are fields generated by CloudWatch Logs. For example, `@timestamp` is the timestamp of each log event. For more information about the fields that are generated by CloudWatch logs, see [Supported Logs and Discovered Fields](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html).
*
* The response results are sorted by the frequency percentage, starting with the highest percentage.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend fun getLogGroupFields(input: GetLogGroupFieldsRequest = GetLogGroupFieldsRequest { }): GetLogGroupFieldsResponse
/**
* Retrieves all of the fields and values of a single log event. All fields are retrieved, even if the original query that produced the `logRecordPointer` retrieved only a subset of fields. Fields are returned as field name/field value pairs.
*
* The full unparsed log event is returned within `@message`.
*/
public suspend fun getLogRecord(input: GetLogRecordRequest): GetLogRecordResponse
/**
* Returns the results from the specified query.
*
* Only the fields requested in the query are returned, along with a `@ptr` field, which is the identifier for the log record. You can use the value of `@ptr` in a [GetLogRecord](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogRecord.html) operation to get the full log record.
*
* `GetQueryResults` does not start running a query. To run a query, use [StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html). For more information about how long results of previous queries are available, see [CloudWatch Logs quotas](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html).
*
* If the value of the `Status` field in the output is `Running`, this operation returns only partial results. If you see a value of `Scheduled` or `Running` for the status, you can retry the operation later to see the final results.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start queries in linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend fun getQueryResults(input: GetQueryResultsRequest): GetQueryResultsResponse
/**
* Returns a list of anomalies that log anomaly detectors have found. For details about the structure format of each anomaly object that is returned, see the example in this section.
*/
public suspend fun listAnomalies(input: ListAnomaliesRequest = ListAnomaliesRequest { }): ListAnomaliesResponse
/**
* Retrieves a list of the log anomaly detectors in the account.
*/
public suspend fun listLogAnomalyDetectors(input: ListLogAnomalyDetectorsRequest = ListLogAnomalyDetectorsRequest { }): ListLogAnomalyDetectorsResponse
/**
* Displays the tags associated with a CloudWatch Logs resource. Currently, log groups and destinations support tagging.
*/
public suspend fun listTagsForResource(input: ListTagsForResourceRequest): ListTagsForResourceResponse
/**
* The ListTagsLogGroup operation is on the path to deprecation. We recommend that you use [ListTagsForResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html) instead.
*
* Lists the tags for the specified log group.
*/
@Deprecated("Please use the generic tagging API ListTagsForResource")
public suspend fun listTagsLogGroup(input: ListTagsLogGroupRequest): ListTagsLogGroupResponse
/**
* Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account.
*
* **Data protection policy**
*
* A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
*
* Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
*
* If you use `PutAccountPolicy` to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
*
* By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the `logs:Unmask` permission can use a [GetLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html) or [FilterLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_FilterLogEvents.html) operation with the `unmask` parameter set to `true` to view the unmasked log events. Users with the `logs:Unmask` can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the `unmask` query command.
*
* For more information, including a list of types of data that can be audited and masked, see [Protect sensitive log data with masking](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html).
*
* To use the `PutAccountPolicy` operation for a data protection policy, you must be signed on with the `logs:PutDataProtectionPolicy` and `logs:PutAccountPolicy` permissions.
*
* The `PutAccountPolicy` operation applies to all log groups in the account. You can use [PutDataProtectionPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDataProtectionPolicy.html) to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
*
* **Subscription filter policy**
*
* A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
*
* The following destinations are supported for subscription filters:
* + An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
* + An Firehose data stream in the same account as the subscription policy, for same-account delivery.
* + A Lambda function in the same account as the subscription policy, for same-account delivery.
* + A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html), for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
*
* Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in `PolicyName`. To perform a `PutAccountPolicy` subscription filter operation for any destination except a Lambda function, you must also have the `iam:PassRole` permission.
*/
public suspend fun putAccountPolicy(input: PutAccountPolicyRequest): PutAccountPolicyResponse
/**
* Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data.
*
* Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked.
*
* By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the `logs:Unmask` permission can use a [GetLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html) or [FilterLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_FilterLogEvents.html) operation with the `unmask` parameter set to `true` to view the unmasked log events. Users with the `logs:Unmask` can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the `unmask` query command.
*
* For more information, including a list of types of data that can be audited and masked, see [Protect sensitive log data with masking](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html).
*
* The `PutDataProtectionPolicy` operation applies to only the specified log group. You can also use [PutAccountPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutAccountPolicy.html) to create an account-level data protection policy that applies to all log groups in the account, including both existing log groups and log groups that are created level. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
*/
public suspend fun putDataProtectionPolicy(input: PutDataProtectionPolicyRequest): PutDataProtectionPolicyResponse
/**
* Creates or updates a logical *delivery destination*. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.
*
* To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
* + Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see [PutDeliverySource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html).
* + Use `PutDeliveryDestination` to create a *delivery destination*, which is a logical object that represents the actual delivery destination.
* + If you are delivering logs cross-account, you must use [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
* + Use `CreateDelivery` to create a *delivery* by pairing exactly one delivery source and one delivery destination. For more information, see [CreateDelivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html).
*
* You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
*
* Only some Amazon Web Services services support being configured as a delivery source. These services are listed as **Supported [V2 Permissions]** in the table at [Enabling logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
*/
public suspend fun putDeliveryDestination(input: PutDeliveryDestinationRequest): PutDeliveryDestinationResponse
/**
* Creates and assigns an IAM policy that grants permissions to CloudWatch Logs to deliver logs cross-account to a specified destination in this account. To configure the delivery of logs from an Amazon Web Services service in another account to a logs delivery destination in the current account, you must do the following:
* + Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see [PutDeliverySource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html).
* + Create a *delivery destination*, which is a logical object that represents the actual delivery destination. For more information, see [PutDeliveryDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
* + Use this operation in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
* + Create a *delivery* by pairing exactly one delivery source and one delivery destination. For more information, see [CreateDelivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html).
*
* Only some Amazon Web Services services support being configured as a delivery source. These services are listed as **Supported [V2 Permissions]** in the table at [Enabling logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* The contents of the policy must include two statements. One statement enables general logs delivery, and the other allows delivery to the chosen destination. See the examples for the needed policies.
*/
public suspend fun putDeliveryDestinationPolicy(input: PutDeliveryDestinationPolicyRequest): PutDeliveryDestinationPolicyResponse
/**
* Creates or updates a logical *delivery source*. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose.
*
* To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:
* + Use `PutDeliverySource` to create a delivery source, which is a logical object that represents the resource that is actually sending the logs.
* + Use `PutDeliveryDestination` to create a *delivery destination*, which is a logical object that represents the actual delivery destination. For more information, see [PutDeliveryDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
* + If you are delivering logs cross-account, you must use [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
* + Use `CreateDelivery` to create a *delivery* by pairing exactly one delivery source and one delivery destination. For more information, see [CreateDelivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html).
*
* You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
*
* Only some Amazon Web Services services support being configured as a delivery source. These services are listed as **Supported [V2 Permissions]** in the table at [Enabling logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
*/
public suspend fun putDeliverySource(input: PutDeliverySourceRequest): PutDeliverySourceResponse
/**
* Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.
*
* A destination encapsulates a physical resource (such as an Amazon Kinesis stream). With a destination, you can subscribe to a real-time stream of log events for a different account, ingested using [PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html).
*
* Through an access policy, a destination controls what is written to it. By default, `PutDestination` does not set any access policy with the destination, which means a cross-account user cannot call [PutSubscriptionFilter](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html) against this destination. To enable this, the destination owner must call [PutDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html) after `PutDestination`.
*
* To perform a `PutDestination` operation, you must also have the `iam:PassRole` permission.
*/
public suspend fun putDestination(input: PutDestinationRequest): PutDestinationResponse
/**
* Creates or updates an access policy associated with an existing destination. An access policy is an [IAM policy document](https://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) that is used to authorize claims to register a subscription filter against a given destination.
*/
public suspend fun putDestinationPolicy(input: PutDestinationPolicyRequest): PutDestinationPolicyResponse
/**
* Uploads a batch of log events to the specified log stream.
*
* The sequence token is now ignored in `PutLogEvents` actions. `PutLogEvents` actions are always accepted and never return `InvalidSequenceTokenException` or `DataAlreadyAcceptedException` even if the sequence token is not valid. You can use parallel `PutLogEvents` actions on the same log stream.
*
* The batch of events must satisfy the following constraints:
* + The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
* + None of the log events in the batch can be more than 2 hours in the future.
* + None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group.
* + The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after `Jan 1, 1970 00:00:00 UTC`. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: `yyyy-mm-ddThh:mm:ss`. For example, `2017-09-15T13:45:30`.)
* + A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.
* + Each log event can be no larger than 256 KB.
* + The maximum number of log events in a batch is 10,000.
* + The quota of five requests per second per log stream has been removed. Instead, `PutLogEvents` actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service.
*
* If a call to `PutLogEvents` returns "UnrecognizedClientException" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.
*/
public suspend fun putLogEvents(input: PutLogEventsRequest): PutLogEventsResponse
/**
* Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through [PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html).
*
* The maximum number of metric filters that can be associated with a log group is 100.
*
* Using regular expressions to create metric filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see [ Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html).
*
* When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
*
* Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as `IPAddress` or `requestID` as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
*
* CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
*
* You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see [ Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/monitor_estimated_charges_with_cloudwatch.html).
*/
public suspend fun putMetricFilter(input: PutMetricFilterRequest): PutMetricFilterResponse
/**
* Creates or updates a query definition for CloudWatch Logs Insights. For more information, see [Analyzing Log Data with CloudWatch Logs Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html).
*
* To update a query definition, specify its `queryDefinitionId` in your request. The values of `name`, `queryString`, and `logGroupNames` are changed to the values that you specify in your update operation. No current values are retained from the current query definition. For example, imagine updating a current query definition that includes log groups. If you don't specify the `logGroupNames` parameter in your update operation, the query definition changes to contain no log groups.
*
* You must have the `logs:PutQueryDefinition` permission to be able to perform this operation.
*/
public suspend fun putQueryDefinition(input: PutQueryDefinitionRequest): PutQueryDefinitionResponse
/**
* Creates or updates a resource policy allowing other Amazon Web Services services to put log events to this account, such as Amazon Route 53. An account can have up to 10 resource policies per Amazon Web Services Region.
*/
public suspend fun putResourcePolicy(input: PutResourcePolicyRequest = PutResourcePolicyRequest { }): PutResourcePolicyResponse
/**
* Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group.
*
* CloudWatch Logs doesn't immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.
*
* To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven't been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted.
*
* When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the `storedBytes` value to see how many bytes a log group is storing.
*/
public suspend fun putRetentionPolicy(input: PutRetentionPolicyRequest): PutRetentionPolicyResponse
/**
* Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through [PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
*
* The following destinations are supported for subscription filters:
* + An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
* + A logical destination created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations.
* + An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
* + An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
*
* Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in `filterName`.
*
* Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see [ Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html).
*
* To perform a `PutSubscriptionFilter` operation for any destination except a Lambda function, you must also have the `iam:PassRole` permission.
*/
public suspend fun putSubscriptionFilter(input: PutSubscriptionFilterRequest): PutSubscriptionFilterResponse
/**
* Starts a Live Tail streaming session for one or more log groups. A Live Tail session returns a stream of log events that have been recently ingested in the log groups. For more information, see [Use Live Tail to view logs in near real time](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatchLogs_LiveTail.html).
*
* The response to this operation is a response stream, over which the server sends live log events and the client receives them.
*
* The following objects are sent over the stream:
* + A single [LiveTailSessionStart](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_LiveTailSessionStart.html) object is sent at the start of the session.
* + Every second, a [LiveTailSessionUpdate](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_LiveTailSessionUpdate.html) object is sent. Each of these objects contains an array of the actual log events.If no new log events were ingested in the past second, the `LiveTailSessionUpdate` object will contain an empty array.The array of log events contained in a `LiveTailSessionUpdate` can include as many as 500 log events. If the number of log events matching the request exceeds 500 per second, the log events are sampled down to 500 log events to be included in each `LiveTailSessionUpdate` object.If your client consumes the log events slower than the server produces them, CloudWatch Logs buffers up to 10 `LiveTailSessionUpdate` events or 5000 log events, after which it starts dropping the oldest events.
* + A [SessionStreamingException](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartLiveTailResponseStream.html#CWL-Type-StartLiveTailResponseStream-SessionStreamingException) object is returned if an unknown error occurs on the server side.
* + A [SessionTimeoutException](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartLiveTailResponseStream.html#CWL-Type-StartLiveTailResponseStream-SessionTimeoutException) object is returned when the session times out, after it has been kept open for three hours.
*
* You can end a session before it times out by closing the session stream or by closing the client that is receiving the stream. The session also ends if the established connection between the client and the server breaks.
*
* For examples of using an SDK to start a Live Tail session, see [ Start a Live Tail session using an Amazon Web Services SDK](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/example_cloudwatch-logs_StartLiveTail_section.html).
*/
public suspend fun startLiveTail(input: StartLiveTailRequest, block: suspend (StartLiveTailResponse) -> T): T
/**
* Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.
*
* For more information, see [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
*
* After you run a query using `StartQuery`, the query results are stored by CloudWatch Logs. You can use [GetQueryResults](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) to retrieve the results of a query, using the `queryId` that `StartQuery` returns.
*
* If you have associated a KMS key with the query results in this account, then [StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) uses that key to encrypt the results when it stores them. If no key is associated with query results, the query results are encrypted with the default CloudWatch Logs encryption method.
*
* Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). For a cross-account `StartQuery` operation, the query definition must be defined in the monitoring account.
*
* You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
*/
public suspend fun startQuery(input: StartQueryRequest): StartQueryResponse
/**
* Stops a CloudWatch Logs Insights query that is in progress. If the query has already ended, the operation returns an error indicating that the specified query is not running.
*/
public suspend fun stopQuery(input: StopQueryRequest): StopQueryResponse
/**
* The TagLogGroup operation is on the path to deprecation. We recommend that you use [TagResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagResource.html) instead.
*
* Adds or updates the specified tags for the specified log group.
*
* To list the tags for a log group, use [ListTagsForResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html). To remove tags, use [UntagResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagResource.html).
*
* For more information about tags, see [Tag Log Groups in Amazon CloudWatch Logs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html#log-group-tagging) in the *Amazon CloudWatch Logs User Guide*.
*
* CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the `aws:Resource/key-name ` or `aws:TagKeys` condition keys. For more information about using tags to control access, see [Controlling access to Amazon Web Services resources using tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html).
*/
@Deprecated("Please use the generic tagging API TagResource")
public suspend fun tagLogGroup(input: TagLogGroupRequest): TagLogGroupResponse
/**
* Assigns one or more tags (key-value pairs) to the specified CloudWatch Logs resource. Currently, the only CloudWatch Logs resources that can be tagged are log groups and destinations.
*
* Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
*
* Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
*
* You can use the `TagResource` action with a resource that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.
*
* You can associate as many as 50 tags with a CloudWatch Logs resource.
*/
public suspend fun tagResource(input: TagResourceRequest): TagResourceResponse
/**
* Tests the filter pattern of a metric filter against a sample of log event messages. You can use this operation to validate the correctness of a metric filter pattern.
*/
public suspend fun testMetricFilter(input: TestMetricFilterRequest): TestMetricFilterResponse
/**
* The UntagLogGroup operation is on the path to deprecation. We recommend that you use [UntagResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagResource.html) instead.
*
* Removes the specified tags from the specified log group.
*
* To list the tags for a log group, use [ListTagsForResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html). To add tags, use [TagResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagResource.html).
*
* CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the `aws:Resource/key-name ` or `aws:TagKeys` condition keys.
*/
@Deprecated("Please use the generic tagging API UntagResource")
public suspend fun untagLogGroup(input: UntagLogGroupRequest): UntagLogGroupResponse
/**
* Removes one or more tags from the specified resource.
*/
public suspend fun untagResource(input: UntagResourceRequest): UntagResourceResponse
/**
* Use this operation to *suppress* anomaly detection for a specified anomaly or pattern. If you suppress an anomaly, CloudWatch Logs won't report new occurrences of that anomaly and won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won't report any anomalies related to that pattern.
*
* You must specify either `anomalyId` or `patternId`, but you can't specify both parameters in the same operation.
*
* If you have previously used this operation to suppress detection of a pattern or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. To do this, use this operation and specify the anomaly or pattern to stop suppressing, and omit the `suppressionType` and `suppressionPeriod` parameters.
*/
public suspend fun updateAnomaly(input: UpdateAnomalyRequest): UpdateAnomalyResponse
/**
* Use this operation to update the configuration of a [delivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Delivery.html) to change either the S3 path pattern or the format of the delivered logs. You can't use this operation to change the source or destination of the delivery.
*/
public suspend fun updateDeliveryConfiguration(input: UpdateDeliveryConfigurationRequest): UpdateDeliveryConfigurationResponse
/**
* Updates an existing log anomaly detector.
*/
public suspend fun updateLogAnomalyDetector(input: UpdateLogAnomalyDetectorRequest): UpdateLogAnomalyDetectorResponse
}
/**
* Create a copy of the client with one or more configuration values overridden.
* This method allows the caller to perform scoped config overrides for one or more client operations.
*
* Any resources created on your behalf will be shared between clients, and will only be closed when ALL clients using them are closed.
* If you provide a resource (e.g. [HttpClientEngine]) to the SDK, you are responsible for managing the lifetime of that resource.
*/
public fun CloudWatchLogsClient.withConfig(block: CloudWatchLogsClient.Config.Builder.() -> Unit): CloudWatchLogsClient {
val newConfig = config.toBuilder().apply(block).build()
return DefaultCloudWatchLogsClient(newConfig)
}
/**
* Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.
*
* When you use `AssociateKmsKey`, you specify either the `logGroupName` parameter or the `resourceIdentifier` parameter. You can't specify both of those parameters in the same operation.
* + Specify the `logGroupName` parameter to cause all log events stored in the log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an `AssociateKmsKey` operation with the `resourceIdentifier` parameter that specifies a `query-result` resource.
* + Specify the `resourceIdentifier` parameter with a `query-result` resource, to use that key to encrypt the stored results of all future [StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) operations in the account. The response from a [GetQueryResults](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) operation will still return the query results in plain text.Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method.If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used.
*
* If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.
*
* CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see [Using Symmetric and Asymmetric Keys](https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
*
* It can take up to 5 minutes for this operation to take effect.
*
* If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an `InvalidParameterException` error.
*/
public suspend inline fun CloudWatchLogsClient.associateKmsKey(crossinline block: AssociateKmsKeyRequest.Builder.() -> Unit): AssociateKmsKeyResponse = associateKmsKey(AssociateKmsKeyRequest.Builder().apply(block).build())
/**
* Cancels the specified export task.
*
* The task must be in the `PENDING` or `RUNNING` state.
*/
public suspend inline fun CloudWatchLogsClient.cancelExportTask(crossinline block: CancelExportTaskRequest.Builder.() -> Unit): CancelExportTaskResponse = cancelExportTask(CancelExportTaskRequest.Builder().apply(block).build())
/**
* Creates a *delivery*. A delivery is a connection between a logical *delivery source* and a logical *delivery destination* that you have already created.
*
* Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as **Supported [V2 Permissions]** in the table at [Enabling logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
*
* To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
* + Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see [PutDeliverySource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html).
* + Create a *delivery destination*, which is a logical object that represents the actual delivery destination. For more information, see [PutDeliveryDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
* + If you are delivering logs cross-account, you must use [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
* + Use `CreateDelivery` to create a *delivery* by pairing exactly one delivery source and one delivery destination.
*
* You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
*
* To update an existing delivery configuration, use [UpdateDeliveryConfiguration](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UpdateDeliveryConfiguration.html).
*/
public suspend inline fun CloudWatchLogsClient.createDelivery(crossinline block: CreateDeliveryRequest.Builder.() -> Unit): CreateDeliveryResponse = createDelivery(CreateDeliveryRequest.Builder().apply(block).build())
/**
* Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a `CreateExportTask` operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
*
* Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
*
* Exporting to S3 buckets that are encrypted with AES-256 is supported.
*
* This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use [DescribeExportTasks](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeExportTasks.html) to get the status of the export task. Each account can only have one active (`RUNNING` or `PENDING`) export task at a time. To cancel an export task, use [CancelExportTask](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CancelExportTask.html).
*
* You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
*
* Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.
*/
public suspend inline fun CloudWatchLogsClient.createExportTask(crossinline block: CreateExportTaskRequest.Builder.() -> Unit): CreateExportTaskResponse = createExportTask(CreateExportTaskRequest.Builder().apply(block).build())
/**
* Creates an *anomaly detector* that regularly scans one or more log groups and look for patterns and anomalies in the logs.
*
* An anomaly detector can help surface issues by automatically discovering anomalies in your log event traffic. An anomaly detector uses machine learning algorithms to scan log events and find *patterns*. A pattern is a shared text structure that recurs among your log fields. Patterns provide a useful tool for analyzing large sets of logs because a large number of log events can often be compressed into a few patterns.
*
* The anomaly detector uses pattern recognition to find `anomalies`, which are unusual log events. It uses the `evaluationFrequency` to compare current log events and patterns with trained baselines.
*
* Fields within a pattern are called *tokens*. Fields that vary within a pattern, such as a request ID or timestamp, are referred to as *dynamic tokens* and represented by `<*>`.
*
* The following is an example of a pattern:
*
* `[INFO] Request time: <*> ms`
*
* This pattern represents log events like `[INFO] Request time: 327 ms` and other similar log events that differ only by the number, in this csse 327. When the pattern is displayed, the different numbers are replaced by `<*>`
*
* Any parts of log events that are masked as sensitive data are not scanned for anomalies. For more information about masking sensitive data, see [Help protect sensitive log data with masking](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html).
*/
public suspend inline fun CloudWatchLogsClient.createLogAnomalyDetector(crossinline block: CreateLogAnomalyDetectorRequest.Builder.() -> Unit): CreateLogAnomalyDetectorResponse = createLogAnomalyDetector(CreateLogAnomalyDetectorRequest.Builder().apply(block).build())
/**
* Creates a log group with the specified name. You can create up to 1,000,000 log groups per Region per account.
*
* You must use the following guidelines when naming a log group:
* + Log group names must be unique within a Region for an Amazon Web Services account.
* + Log group names can be between 1 and 512 characters long.
* + Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)
* + Log group names can't start with the string `aws/`
*
* When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use [PutRetentionPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html).
*
* If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
*
* If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an `InvalidParameterException` error.
*
* CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see [Using Symmetric and Asymmetric Keys](https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html).
*/
public suspend inline fun CloudWatchLogsClient.createLogGroup(crossinline block: CreateLogGroupRequest.Builder.() -> Unit): CreateLogGroupResponse = createLogGroup(CreateLogGroupRequest.Builder().apply(block).build())
/**
* Creates a log stream for the specified log group. A log stream is a sequence of log events that originate from a single source, such as an application instance or a resource that is being monitored.
*
* There is no limit on the number of log streams that you can create for a log group. There is a limit of 50 TPS on `CreateLogStream` operations, after which transactions are throttled.
*
* You must use the following guidelines when naming a log stream:
* + Log stream names must be unique within the log group.
* + Log stream names can be between 1 and 512 characters long.
* + Don't use ':' (colon) or '*' (asterisk) characters.
*/
public suspend inline fun CloudWatchLogsClient.createLogStream(crossinline block: CreateLogStreamRequest.Builder.() -> Unit): CreateLogStreamResponse = createLogStream(CreateLogStreamRequest.Builder().apply(block).build())
/**
* Deletes a CloudWatch Logs account policy. This stops the policy from applying to all log groups or a subset of log groups in the account. Log-group level policies will still be in effect.
*
* To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are deleting.
* + To delete a data protection policy, you must have the `logs:DeleteDataProtectionPolicy` and `logs:DeleteAccountPolicy` permissions.
* + To delete a subscription filter policy, you must have the `logs:DeleteSubscriptionFilter` and `logs:DeleteAccountPolicy` permissions.
*/
public suspend inline fun CloudWatchLogsClient.deleteAccountPolicy(crossinline block: DeleteAccountPolicyRequest.Builder.() -> Unit): DeleteAccountPolicyResponse = deleteAccountPolicy(DeleteAccountPolicyRequest.Builder().apply(block).build())
/**
* Deletes the data protection policy from the specified log group.
*
* For more information about data protection policies, see [PutDataProtectionPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDataProtectionPolicy.html).
*/
public suspend inline fun CloudWatchLogsClient.deleteDataProtectionPolicy(crossinline block: DeleteDataProtectionPolicyRequest.Builder.() -> Unit): DeleteDataProtectionPolicyResponse = deleteDataProtectionPolicy(DeleteDataProtectionPolicyRequest.Builder().apply(block).build())
/**
* Deletes s *delivery*. A delivery is a connection between a logical *delivery source* and a logical *delivery destination*. Deleting a delivery only deletes the connection between the delivery source and delivery destination. It does not delete the delivery destination or the delivery source.
*/
public suspend inline fun CloudWatchLogsClient.deleteDelivery(crossinline block: DeleteDeliveryRequest.Builder.() -> Unit): DeleteDeliveryResponse = deleteDelivery(DeleteDeliveryRequest.Builder().apply(block).build())
/**
* Deletes a *delivery destination*. A delivery is a connection between a logical *delivery source* and a logical *delivery destination*.
*
* You can't delete a delivery destination if any current deliveries are associated with it. To find whether any deliveries are associated with this delivery destination, use the [DescribeDeliveries](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) operation and check the `deliveryDestinationArn` field in the results.
*/
public suspend inline fun CloudWatchLogsClient.deleteDeliveryDestination(crossinline block: DeleteDeliveryDestinationRequest.Builder.() -> Unit): DeleteDeliveryDestinationResponse = deleteDeliveryDestination(DeleteDeliveryDestinationRequest.Builder().apply(block).build())
/**
* Deletes a delivery destination policy. For more information about these policies, see [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html).
*/
public suspend inline fun CloudWatchLogsClient.deleteDeliveryDestinationPolicy(crossinline block: DeleteDeliveryDestinationPolicyRequest.Builder.() -> Unit): DeleteDeliveryDestinationPolicyResponse = deleteDeliveryDestinationPolicy(DeleteDeliveryDestinationPolicyRequest.Builder().apply(block).build())
/**
* Deletes a *delivery source*. A delivery is a connection between a logical *delivery source* and a logical *delivery destination*.
*
* You can't delete a delivery source if any current deliveries are associated with it. To find whether any deliveries are associated with this delivery source, use the [DescribeDeliveries](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) operation and check the `deliverySourceName` field in the results.
*/
public suspend inline fun CloudWatchLogsClient.deleteDeliverySource(crossinline block: DeleteDeliverySourceRequest.Builder.() -> Unit): DeleteDeliverySourceResponse = deleteDeliverySource(DeleteDeliverySourceRequest.Builder().apply(block).build())
/**
* Deletes the specified destination, and eventually disables all the subscription filters that publish to it. This operation does not delete the physical resource encapsulated by the destination.
*/
public suspend inline fun CloudWatchLogsClient.deleteDestination(crossinline block: DeleteDestinationRequest.Builder.() -> Unit): DeleteDestinationResponse = deleteDestination(DeleteDestinationRequest.Builder().apply(block).build())
/**
* Deletes the specified CloudWatch Logs anomaly detector.
*/
public suspend inline fun CloudWatchLogsClient.deleteLogAnomalyDetector(crossinline block: DeleteLogAnomalyDetectorRequest.Builder.() -> Unit): DeleteLogAnomalyDetectorResponse = deleteLogAnomalyDetector(DeleteLogAnomalyDetectorRequest.Builder().apply(block).build())
/**
* Deletes the specified log group and permanently deletes all the archived log events associated with the log group.
*/
public suspend inline fun CloudWatchLogsClient.deleteLogGroup(crossinline block: DeleteLogGroupRequest.Builder.() -> Unit): DeleteLogGroupResponse = deleteLogGroup(DeleteLogGroupRequest.Builder().apply(block).build())
/**
* Deletes the specified log stream and permanently deletes all the archived log events associated with the log stream.
*/
public suspend inline fun CloudWatchLogsClient.deleteLogStream(crossinline block: DeleteLogStreamRequest.Builder.() -> Unit): DeleteLogStreamResponse = deleteLogStream(DeleteLogStreamRequest.Builder().apply(block).build())
/**
* Deletes the specified metric filter.
*/
public suspend inline fun CloudWatchLogsClient.deleteMetricFilter(crossinline block: DeleteMetricFilterRequest.Builder.() -> Unit): DeleteMetricFilterResponse = deleteMetricFilter(DeleteMetricFilterRequest.Builder().apply(block).build())
/**
* Deletes a saved CloudWatch Logs Insights query definition. A query definition contains details about a saved CloudWatch Logs Insights query.
*
* Each `DeleteQueryDefinition` operation can delete one query definition.
*
* You must have the `logs:DeleteQueryDefinition` permission to be able to perform this operation.
*/
public suspend inline fun CloudWatchLogsClient.deleteQueryDefinition(crossinline block: DeleteQueryDefinitionRequest.Builder.() -> Unit): DeleteQueryDefinitionResponse = deleteQueryDefinition(DeleteQueryDefinitionRequest.Builder().apply(block).build())
/**
* Deletes a resource policy from this account. This revokes the access of the identities in that policy to put log events to this account.
*/
public suspend inline fun CloudWatchLogsClient.deleteResourcePolicy(crossinline block: DeleteResourcePolicyRequest.Builder.() -> Unit): DeleteResourcePolicyResponse = deleteResourcePolicy(DeleteResourcePolicyRequest.Builder().apply(block).build())
/**
* Deletes the specified retention policy.
*
* Log events do not expire if they belong to log groups without a retention policy.
*/
public suspend inline fun CloudWatchLogsClient.deleteRetentionPolicy(crossinline block: DeleteRetentionPolicyRequest.Builder.() -> Unit): DeleteRetentionPolicyResponse = deleteRetentionPolicy(DeleteRetentionPolicyRequest.Builder().apply(block).build())
/**
* Deletes the specified subscription filter.
*/
public suspend inline fun CloudWatchLogsClient.deleteSubscriptionFilter(crossinline block: DeleteSubscriptionFilterRequest.Builder.() -> Unit): DeleteSubscriptionFilterResponse = deleteSubscriptionFilter(DeleteSubscriptionFilterRequest.Builder().apply(block).build())
/**
* Returns a list of all CloudWatch Logs account policies in the account.
*/
public suspend inline fun CloudWatchLogsClient.describeAccountPolicies(crossinline block: DescribeAccountPoliciesRequest.Builder.() -> Unit): DescribeAccountPoliciesResponse = describeAccountPolicies(DescribeAccountPoliciesRequest.Builder().apply(block).build())
/**
* Use this operation to return the valid and default values that are used when creating delivery sources, delivery destinations, and deliveries. For more information about deliveries, see [CreateDelivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html).
*/
public suspend inline fun CloudWatchLogsClient.describeConfigurationTemplates(crossinline block: DescribeConfigurationTemplatesRequest.Builder.() -> Unit): DescribeConfigurationTemplatesResponse = describeConfigurationTemplates(DescribeConfigurationTemplatesRequest.Builder().apply(block).build())
/**
* Retrieves a list of the deliveries that have been created in the account.
*
* A *delivery* is a connection between a *delivery source*[](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) and a *delivery destination*[](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
*
* A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in [Enable logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*/
public suspend inline fun CloudWatchLogsClient.describeDeliveries(crossinline block: DescribeDeliveriesRequest.Builder.() -> Unit): DescribeDeliveriesResponse = describeDeliveries(DescribeDeliveriesRequest.Builder().apply(block).build())
/**
* Retrieves a list of the delivery destinations that have been created in the account.
*/
public suspend inline fun CloudWatchLogsClient.describeDeliveryDestinations(crossinline block: DescribeDeliveryDestinationsRequest.Builder.() -> Unit): DescribeDeliveryDestinationsResponse = describeDeliveryDestinations(DescribeDeliveryDestinationsRequest.Builder().apply(block).build())
/**
* Retrieves a list of the delivery sources that have been created in the account.
*/
public suspend inline fun CloudWatchLogsClient.describeDeliverySources(crossinline block: DescribeDeliverySourcesRequest.Builder.() -> Unit): DescribeDeliverySourcesResponse = describeDeliverySources(DescribeDeliverySourcesRequest.Builder().apply(block).build())
/**
* Lists all your destinations. The results are ASCII-sorted by destination name.
*/
public suspend inline fun CloudWatchLogsClient.describeDestinations(crossinline block: DescribeDestinationsRequest.Builder.() -> Unit): DescribeDestinationsResponse = describeDestinations(DescribeDestinationsRequest.Builder().apply(block).build())
/**
* Lists the specified export tasks. You can list all your export tasks or filter the results based on task ID or task status.
*/
public suspend inline fun CloudWatchLogsClient.describeExportTasks(crossinline block: DescribeExportTasksRequest.Builder.() -> Unit): DescribeExportTasksResponse = describeExportTasks(DescribeExportTasksRequest.Builder().apply(block).build())
/**
* Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.
*
* CloudWatch Logs doesn't support IAM policies that control access to the `DescribeLogGroups` action by using the `aws:ResourceTag/key-name ` condition key. Other CloudWatch Logs actions do support the use of the `aws:ResourceTag/key-name ` condition key to control access. For more information about using tags to control access, see [Controlling access to Amazon Web Services resources using tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html).
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend inline fun CloudWatchLogsClient.describeLogGroups(crossinline block: DescribeLogGroupsRequest.Builder.() -> Unit): DescribeLogGroupsResponse = describeLogGroups(DescribeLogGroupsRequest.Builder().apply(block).build())
/**
* Lists the log streams for the specified log group. You can list all the log streams or filter the results by prefix. You can also control how the results are ordered.
*
* You can specify the log group to search by using either `logGroupIdentifier` or `logGroupName`. You must include one of these two parameters, but you can't include both.
*
* This operation has a limit of five transactions per second, after which transactions are throttled.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend inline fun CloudWatchLogsClient.describeLogStreams(crossinline block: DescribeLogStreamsRequest.Builder.() -> Unit): DescribeLogStreamsResponse = describeLogStreams(DescribeLogStreamsRequest.Builder().apply(block).build())
/**
* Lists the specified metric filters. You can list all of the metric filters or filter the results by log name, prefix, metric name, or metric namespace. The results are ASCII-sorted by filter name.
*/
public suspend inline fun CloudWatchLogsClient.describeMetricFilters(crossinline block: DescribeMetricFiltersRequest.Builder.() -> Unit): DescribeMetricFiltersResponse = describeMetricFilters(DescribeMetricFiltersRequest.Builder().apply(block).build())
/**
* Returns a list of CloudWatch Logs Insights queries that are scheduled, running, or have been run recently in this account. You can request all queries or limit it to queries of a specific log group or queries with a certain status.
*/
public suspend inline fun CloudWatchLogsClient.describeQueries(crossinline block: DescribeQueriesRequest.Builder.() -> Unit): DescribeQueriesResponse = describeQueries(DescribeQueriesRequest.Builder().apply(block).build())
/**
* This operation returns a paginated list of your saved CloudWatch Logs Insights query definitions. You can retrieve query definitions from the current account or from a source account that is linked to the current account.
*
* You can use the `queryDefinitionNamePrefix` parameter to limit the results to only the query definitions that have names that start with a certain string.
*/
public suspend inline fun CloudWatchLogsClient.describeQueryDefinitions(crossinline block: DescribeQueryDefinitionsRequest.Builder.() -> Unit): DescribeQueryDefinitionsResponse = describeQueryDefinitions(DescribeQueryDefinitionsRequest.Builder().apply(block).build())
/**
* Lists the resource policies in this account.
*/
public suspend inline fun CloudWatchLogsClient.describeResourcePolicies(crossinline block: DescribeResourcePoliciesRequest.Builder.() -> Unit): DescribeResourcePoliciesResponse = describeResourcePolicies(DescribeResourcePoliciesRequest.Builder().apply(block).build())
/**
* Lists the subscription filters for the specified log group. You can list all the subscription filters or filter the results by prefix. The results are ASCII-sorted by filter name.
*/
public suspend inline fun CloudWatchLogsClient.describeSubscriptionFilters(crossinline block: DescribeSubscriptionFiltersRequest.Builder.() -> Unit): DescribeSubscriptionFiltersResponse = describeSubscriptionFilters(DescribeSubscriptionFiltersRequest.Builder().apply(block).build())
/**
* Disassociates the specified KMS key from the specified log group or from all CloudWatch Logs Insights query results in the account.
*
* When you use `DisassociateKmsKey`, you specify either the `logGroupName` parameter or the `resourceIdentifier` parameter. You can't specify both of those parameters in the same operation.
* + Specify the `logGroupName` parameter to stop using the KMS key to encrypt future log events ingested and stored in the log group. Instead, they will be encrypted with the default CloudWatch Logs method. The log events that were ingested while the key was associated with the log group are still encrypted with that key. Therefore, CloudWatch Logs will need permissions for the key whenever that data is accessed.
* + Specify the `resourceIdentifier` parameter with the `query-result` resource to stop using the KMS key to encrypt the results of all future [StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) operations in the account. They will instead be encrypted with the default CloudWatch Logs method. The results from queries that ran while the key was associated with the account are still encrypted with that key. Therefore, CloudWatch Logs will need permissions for the key whenever that data is accessed.
*
* It can take up to 5 minutes for this operation to take effect.
*/
public suspend inline fun CloudWatchLogsClient.disassociateKmsKey(crossinline block: DisassociateKmsKeyRequest.Builder.() -> Unit): DisassociateKmsKeyResponse = disassociateKmsKey(DisassociateKmsKeyRequest.Builder().apply(block).build())
/**
* Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream.
*
* You must have the `logs:FilterLogEvents` permission to perform this operation.
*
* You can specify the log group to search by using either `logGroupIdentifier` or `logGroupName`. You must include one of these two parameters, but you can't include both.
*
* By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token.
*
* The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the `PutLogEvents` request.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend inline fun CloudWatchLogsClient.filterLogEvents(crossinline block: FilterLogEventsRequest.Builder.() -> Unit): FilterLogEventsResponse = filterLogEvents(FilterLogEventsRequest.Builder().apply(block).build())
/**
* Returns information about a log group data protection policy.
*/
public suspend inline fun CloudWatchLogsClient.getDataProtectionPolicy(crossinline block: GetDataProtectionPolicyRequest.Builder.() -> Unit): GetDataProtectionPolicyResponse = getDataProtectionPolicy(GetDataProtectionPolicyRequest.Builder().apply(block).build())
/**
* Returns complete information about one logical *delivery*. A delivery is a connection between a *delivery source*[](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html) and a *delivery destination*[](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
*
* A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in [Enable logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* You need to specify the delivery `id` in this operation. You can find the IDs of the deliveries in your account with the [DescribeDeliveries](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeDeliveries.html) operation.
*/
public suspend inline fun CloudWatchLogsClient.getDelivery(crossinline block: GetDeliveryRequest.Builder.() -> Unit): GetDeliveryResponse = getDelivery(GetDeliveryRequest.Builder().apply(block).build())
/**
* Retrieves complete information about one delivery destination.
*/
public suspend inline fun CloudWatchLogsClient.getDeliveryDestination(crossinline block: GetDeliveryDestinationRequest.Builder.() -> Unit): GetDeliveryDestinationResponse = getDeliveryDestination(GetDeliveryDestinationRequest.Builder().apply(block).build())
/**
* Retrieves the delivery destination policy assigned to the delivery destination that you specify. For more information about delivery destinations and their policies, see [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html).
*/
public suspend inline fun CloudWatchLogsClient.getDeliveryDestinationPolicy(crossinline block: GetDeliveryDestinationPolicyRequest.Builder.() -> Unit): GetDeliveryDestinationPolicyResponse = getDeliveryDestinationPolicy(GetDeliveryDestinationPolicyRequest.Builder().apply(block).build())
/**
* Retrieves complete information about one delivery source.
*/
public suspend inline fun CloudWatchLogsClient.getDeliverySource(crossinline block: GetDeliverySourceRequest.Builder.() -> Unit): GetDeliverySourceResponse = getDeliverySource(GetDeliverySourceRequest.Builder().apply(block).build())
/**
* Retrieves information about the log anomaly detector that you specify.
*/
public suspend inline fun CloudWatchLogsClient.getLogAnomalyDetector(crossinline block: GetLogAnomalyDetectorRequest.Builder.() -> Unit): GetLogAnomalyDetectorResponse = getLogAnomalyDetector(GetLogAnomalyDetectorRequest.Builder().apply(block).build())
/**
* Lists log events from the specified log stream. You can list all of the log events or filter using a time range.
*
* By default, this operation returns as many log events as can fit in a response size of 1MB (up to 10,000 log events). You can get additional log events by specifying one of the tokens in a subsequent call. This operation can return empty results while there are more log events available through the token.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*
* You can specify the log group to search by using either `logGroupIdentifier` or `logGroupName`. You must include one of these two parameters, but you can't include both.
*/
public suspend inline fun CloudWatchLogsClient.getLogEvents(crossinline block: GetLogEventsRequest.Builder.() -> Unit): GetLogEventsResponse = getLogEvents(GetLogEventsRequest.Builder().apply(block).build())
/**
* Returns a list of the fields that are included in log events in the specified log group. Includes the percentage of log events that contain each field. The search is limited to a time period that you specify.
*
* You can specify the log group to search by using either `logGroupIdentifier` or `logGroupName`. You must specify one of these parameters, but you can't specify both.
*
* In the results, fields that start with `@` are fields generated by CloudWatch Logs. For example, `@timestamp` is the timestamp of each log event. For more information about the fields that are generated by CloudWatch logs, see [Supported Logs and Discovered Fields](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html).
*
* The response results are sorted by the frequency percentage, starting with the highest percentage.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend inline fun CloudWatchLogsClient.getLogGroupFields(crossinline block: GetLogGroupFieldsRequest.Builder.() -> Unit): GetLogGroupFieldsResponse = getLogGroupFields(GetLogGroupFieldsRequest.Builder().apply(block).build())
/**
* Retrieves all of the fields and values of a single log event. All fields are retrieved, even if the original query that produced the `logRecordPointer` retrieved only a subset of fields. Fields are returned as field name/field value pairs.
*
* The full unparsed log event is returned within `@message`.
*/
public suspend inline fun CloudWatchLogsClient.getLogRecord(crossinline block: GetLogRecordRequest.Builder.() -> Unit): GetLogRecordResponse = getLogRecord(GetLogRecordRequest.Builder().apply(block).build())
/**
* Returns the results from the specified query.
*
* Only the fields requested in the query are returned, along with a `@ptr` field, which is the identifier for the log record. You can use the value of `@ptr` in a [GetLogRecord](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogRecord.html) operation to get the full log record.
*
* `GetQueryResults` does not start running a query. To run a query, use [StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html). For more information about how long results of previous queries are available, see [CloudWatch Logs quotas](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html).
*
* If the value of the `Status` field in the output is `Running`, this operation returns only partial results. If you see a value of `Scheduled` or `Running` for the status, you can retry the operation later to see the final results.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start queries in linked source accounts. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
*/
public suspend inline fun CloudWatchLogsClient.getQueryResults(crossinline block: GetQueryResultsRequest.Builder.() -> Unit): GetQueryResultsResponse = getQueryResults(GetQueryResultsRequest.Builder().apply(block).build())
/**
* Returns a list of anomalies that log anomaly detectors have found. For details about the structure format of each anomaly object that is returned, see the example in this section.
*/
public suspend inline fun CloudWatchLogsClient.listAnomalies(crossinline block: ListAnomaliesRequest.Builder.() -> Unit): ListAnomaliesResponse = listAnomalies(ListAnomaliesRequest.Builder().apply(block).build())
/**
* Retrieves a list of the log anomaly detectors in the account.
*/
public suspend inline fun CloudWatchLogsClient.listLogAnomalyDetectors(crossinline block: ListLogAnomalyDetectorsRequest.Builder.() -> Unit): ListLogAnomalyDetectorsResponse = listLogAnomalyDetectors(ListLogAnomalyDetectorsRequest.Builder().apply(block).build())
/**
* Displays the tags associated with a CloudWatch Logs resource. Currently, log groups and destinations support tagging.
*/
public suspend inline fun CloudWatchLogsClient.listTagsForResource(crossinline block: ListTagsForResourceRequest.Builder.() -> Unit): ListTagsForResourceResponse = listTagsForResource(ListTagsForResourceRequest.Builder().apply(block).build())
/**
* The ListTagsLogGroup operation is on the path to deprecation. We recommend that you use [ListTagsForResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html) instead.
*
* Lists the tags for the specified log group.
*/
@Deprecated("Please use the generic tagging API ListTagsForResource")
public suspend inline fun CloudWatchLogsClient.listTagsLogGroup(crossinline block: ListTagsLogGroupRequest.Builder.() -> Unit): ListTagsLogGroupResponse = listTagsLogGroup(ListTagsLogGroupRequest.Builder().apply(block).build())
/**
* Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account.
*
* **Data protection policy**
*
* A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.
*
* Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.
*
* If you use `PutAccountPolicy` to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.
*
* By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the `logs:Unmask` permission can use a [GetLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html) or [FilterLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_FilterLogEvents.html) operation with the `unmask` parameter set to `true` to view the unmasked log events. Users with the `logs:Unmask` can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the `unmask` query command.
*
* For more information, including a list of types of data that can be audited and masked, see [Protect sensitive log data with masking](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html).
*
* To use the `PutAccountPolicy` operation for a data protection policy, you must be signed on with the `logs:PutDataProtectionPolicy` and `logs:PutAccountPolicy` permissions.
*
* The `PutAccountPolicy` operation applies to all log groups in the account. You can use [PutDataProtectionPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDataProtectionPolicy.html) to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
*
* **Subscription filter policy**
*
* A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
*
* The following destinations are supported for subscription filters:
* + An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.
* + An Firehose data stream in the same account as the subscription policy, for same-account delivery.
* + A Lambda function in the same account as the subscription policy, for same-account delivery.
* + A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html), for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.
*
* Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in `PolicyName`. To perform a `PutAccountPolicy` subscription filter operation for any destination except a Lambda function, you must also have the `iam:PassRole` permission.
*/
public suspend inline fun CloudWatchLogsClient.putAccountPolicy(crossinline block: PutAccountPolicyRequest.Builder.() -> Unit): PutAccountPolicyResponse = putAccountPolicy(PutAccountPolicyRequest.Builder().apply(block).build())
/**
* Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data.
*
* Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked.
*
* By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the `logs:Unmask` permission can use a [GetLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogEvents.html) or [FilterLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_FilterLogEvents.html) operation with the `unmask` parameter set to `true` to view the unmasked log events. Users with the `logs:Unmask` can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the `unmask` query command.
*
* For more information, including a list of types of data that can be audited and masked, see [Protect sensitive log data with masking](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html).
*
* The `PutDataProtectionPolicy` operation applies to only the specified log group. You can also use [PutAccountPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutAccountPolicy.html) to create an account-level data protection policy that applies to all log groups in the account, including both existing log groups and log groups that are created level. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
*/
public suspend inline fun CloudWatchLogsClient.putDataProtectionPolicy(crossinline block: PutDataProtectionPolicyRequest.Builder.() -> Unit): PutDataProtectionPolicyResponse = putDataProtectionPolicy(PutDataProtectionPolicyRequest.Builder().apply(block).build())
/**
* Creates or updates a logical *delivery destination*. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations.
*
* To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following:
* + Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see [PutDeliverySource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html).
* + Use `PutDeliveryDestination` to create a *delivery destination*, which is a logical object that represents the actual delivery destination.
* + If you are delivering logs cross-account, you must use [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
* + Use `CreateDelivery` to create a *delivery* by pairing exactly one delivery source and one delivery destination. For more information, see [CreateDelivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html).
*
* You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
*
* Only some Amazon Web Services services support being configured as a delivery source. These services are listed as **Supported [V2 Permissions]** in the table at [Enabling logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
*/
public suspend inline fun CloudWatchLogsClient.putDeliveryDestination(crossinline block: PutDeliveryDestinationRequest.Builder.() -> Unit): PutDeliveryDestinationResponse = putDeliveryDestination(PutDeliveryDestinationRequest.Builder().apply(block).build())
/**
* Creates and assigns an IAM policy that grants permissions to CloudWatch Logs to deliver logs cross-account to a specified destination in this account. To configure the delivery of logs from an Amazon Web Services service in another account to a logs delivery destination in the current account, you must do the following:
* + Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see [PutDeliverySource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliverySource.html).
* + Create a *delivery destination*, which is a logical object that represents the actual delivery destination. For more information, see [PutDeliveryDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
* + Use this operation in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
* + Create a *delivery* by pairing exactly one delivery source and one delivery destination. For more information, see [CreateDelivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html).
*
* Only some Amazon Web Services services support being configured as a delivery source. These services are listed as **Supported [V2 Permissions]** in the table at [Enabling logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* The contents of the policy must include two statements. One statement enables general logs delivery, and the other allows delivery to the chosen destination. See the examples for the needed policies.
*/
public suspend inline fun CloudWatchLogsClient.putDeliveryDestinationPolicy(crossinline block: PutDeliveryDestinationPolicyRequest.Builder.() -> Unit): PutDeliveryDestinationPolicyResponse = putDeliveryDestinationPolicy(PutDeliveryDestinationPolicyRequest.Builder().apply(block).build())
/**
* Creates or updates a logical *delivery source*. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose.
*
* To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following:
* + Use `PutDeliverySource` to create a delivery source, which is a logical object that represents the resource that is actually sending the logs.
* + Use `PutDeliveryDestination` to create a *delivery destination*, which is a logical object that represents the actual delivery destination. For more information, see [PutDeliveryDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestination.html).
* + If you are delivering logs cross-account, you must use [PutDeliveryDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDeliveryDestinationPolicy.html) in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination.
* + Use `CreateDelivery` to create a *delivery* by pairing exactly one delivery source and one delivery destination. For more information, see [CreateDelivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateDelivery.html).
*
* You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination.
*
* Only some Amazon Web Services services support being configured as a delivery source. These services are listed as **Supported [V2 Permissions]** in the table at [Enabling logging from Amazon Web Services services.](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html)
*
* If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
*/
public suspend inline fun CloudWatchLogsClient.putDeliverySource(crossinline block: PutDeliverySourceRequest.Builder.() -> Unit): PutDeliverySourceResponse = putDeliverySource(PutDeliverySourceRequest.Builder().apply(block).build())
/**
* Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.
*
* A destination encapsulates a physical resource (such as an Amazon Kinesis stream). With a destination, you can subscribe to a real-time stream of log events for a different account, ingested using [PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html).
*
* Through an access policy, a destination controls what is written to it. By default, `PutDestination` does not set any access policy with the destination, which means a cross-account user cannot call [PutSubscriptionFilter](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html) against this destination. To enable this, the destination owner must call [PutDestinationPolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html) after `PutDestination`.
*
* To perform a `PutDestination` operation, you must also have the `iam:PassRole` permission.
*/
public suspend inline fun CloudWatchLogsClient.putDestination(crossinline block: PutDestinationRequest.Builder.() -> Unit): PutDestinationResponse = putDestination(PutDestinationRequest.Builder().apply(block).build())
/**
* Creates or updates an access policy associated with an existing destination. An access policy is an [IAM policy document](https://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) that is used to authorize claims to register a subscription filter against a given destination.
*/
public suspend inline fun CloudWatchLogsClient.putDestinationPolicy(crossinline block: PutDestinationPolicyRequest.Builder.() -> Unit): PutDestinationPolicyResponse = putDestinationPolicy(PutDestinationPolicyRequest.Builder().apply(block).build())
/**
* Uploads a batch of log events to the specified log stream.
*
* The sequence token is now ignored in `PutLogEvents` actions. `PutLogEvents` actions are always accepted and never return `InvalidSequenceTokenException` or `DataAlreadyAcceptedException` even if the sequence token is not valid. You can use parallel `PutLogEvents` actions on the same log stream.
*
* The batch of events must satisfy the following constraints:
* + The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
* + None of the log events in the batch can be more than 2 hours in the future.
* + None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group.
* + The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after `Jan 1, 1970 00:00:00 UTC`. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: `yyyy-mm-ddThh:mm:ss`. For example, `2017-09-15T13:45:30`.)
* + A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.
* + Each log event can be no larger than 256 KB.
* + The maximum number of log events in a batch is 10,000.
* + The quota of five requests per second per log stream has been removed. Instead, `PutLogEvents` actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service.
*
* If a call to `PutLogEvents` returns "UnrecognizedClientException" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.
*/
public suspend inline fun CloudWatchLogsClient.putLogEvents(crossinline block: PutLogEventsRequest.Builder.() -> Unit): PutLogEventsResponse = putLogEvents(PutLogEventsRequest.Builder().apply(block).build())
/**
* Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through [PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html).
*
* The maximum number of metric filters that can be associated with a log group is 100.
*
* Using regular expressions to create metric filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in metric filters, see [ Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html).
*
* When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
*
* Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as `IPAddress` or `requestID` as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
*
* CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
*
* You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see [ Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/monitor_estimated_charges_with_cloudwatch.html).
*/
public suspend inline fun CloudWatchLogsClient.putMetricFilter(crossinline block: PutMetricFilterRequest.Builder.() -> Unit): PutMetricFilterResponse = putMetricFilter(PutMetricFilterRequest.Builder().apply(block).build())
/**
* Creates or updates a query definition for CloudWatch Logs Insights. For more information, see [Analyzing Log Data with CloudWatch Logs Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html).
*
* To update a query definition, specify its `queryDefinitionId` in your request. The values of `name`, `queryString`, and `logGroupNames` are changed to the values that you specify in your update operation. No current values are retained from the current query definition. For example, imagine updating a current query definition that includes log groups. If you don't specify the `logGroupNames` parameter in your update operation, the query definition changes to contain no log groups.
*
* You must have the `logs:PutQueryDefinition` permission to be able to perform this operation.
*/
public suspend inline fun CloudWatchLogsClient.putQueryDefinition(crossinline block: PutQueryDefinitionRequest.Builder.() -> Unit): PutQueryDefinitionResponse = putQueryDefinition(PutQueryDefinitionRequest.Builder().apply(block).build())
/**
* Creates or updates a resource policy allowing other Amazon Web Services services to put log events to this account, such as Amazon Route 53. An account can have up to 10 resource policies per Amazon Web Services Region.
*/
public suspend inline fun CloudWatchLogsClient.putResourcePolicy(crossinline block: PutResourcePolicyRequest.Builder.() -> Unit): PutResourcePolicyResponse = putResourcePolicy(PutResourcePolicyRequest.Builder().apply(block).build())
/**
* Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group.
*
* CloudWatch Logs doesn't immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.
*
* To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven't been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted.
*
* When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the `storedBytes` value to see how many bytes a log group is storing.
*/
public suspend inline fun CloudWatchLogsClient.putRetentionPolicy(crossinline block: PutRetentionPolicyRequest.Builder.() -> Unit): PutRetentionPolicyResponse = putRetentionPolicy(PutRetentionPolicyRequest.Builder().apply(block).build())
/**
* Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through [PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
*
* The following destinations are supported for subscription filters:
* + An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
* + A logical destination created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations.
* + An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
* + An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
*
* Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in `filterName`.
*
* Using regular expressions to create subscription filters is supported. For these filters, there is a quotas of quota of two regular expression patterns within a single filter pattern. There is also a quota of five regular expression patterns per log group. For more information about using regular expressions in subscription filters, see [ Filter pattern syntax for metric filters, subscription filters, filter log events, and Live Tail](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html).
*
* To perform a `PutSubscriptionFilter` operation for any destination except a Lambda function, you must also have the `iam:PassRole` permission.
*/
public suspend inline fun CloudWatchLogsClient.putSubscriptionFilter(crossinline block: PutSubscriptionFilterRequest.Builder.() -> Unit): PutSubscriptionFilterResponse = putSubscriptionFilter(PutSubscriptionFilterRequest.Builder().apply(block).build())
/**
* Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.
*
* For more information, see [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html).
*
* After you run a query using `StartQuery`, the query results are stored by CloudWatch Logs. You can use [GetQueryResults](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetQueryResults.html) to retrieve the results of a query, using the `queryId` that `StartQuery` returns.
*
* If you have associated a KMS key with the query results in this account, then [StartQuery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html) uses that key to encrypt the results when it stores them. If no key is associated with query results, the query results are encrypted with the default CloudWatch Logs encryption method.
*
* Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.
*
* If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see [CloudWatch cross-account observability](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html). For a cross-account `StartQuery` operation, the query definition must be defined in the monitoring account.
*
* You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
*/
public suspend inline fun CloudWatchLogsClient.startQuery(crossinline block: StartQueryRequest.Builder.() -> Unit): StartQueryResponse = startQuery(StartQueryRequest.Builder().apply(block).build())
/**
* Stops a CloudWatch Logs Insights query that is in progress. If the query has already ended, the operation returns an error indicating that the specified query is not running.
*/
public suspend inline fun CloudWatchLogsClient.stopQuery(crossinline block: StopQueryRequest.Builder.() -> Unit): StopQueryResponse = stopQuery(StopQueryRequest.Builder().apply(block).build())
/**
* The TagLogGroup operation is on the path to deprecation. We recommend that you use [TagResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagResource.html) instead.
*
* Adds or updates the specified tags for the specified log group.
*
* To list the tags for a log group, use [ListTagsForResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html). To remove tags, use [UntagResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagResource.html).
*
* For more information about tags, see [Tag Log Groups in Amazon CloudWatch Logs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html#log-group-tagging) in the *Amazon CloudWatch Logs User Guide*.
*
* CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the `aws:Resource/key-name ` or `aws:TagKeys` condition keys. For more information about using tags to control access, see [Controlling access to Amazon Web Services resources using tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html).
*/
@Deprecated("Please use the generic tagging API TagResource")
public suspend inline fun CloudWatchLogsClient.tagLogGroup(crossinline block: TagLogGroupRequest.Builder.() -> Unit): TagLogGroupResponse = tagLogGroup(TagLogGroupRequest.Builder().apply(block).build())
/**
* Assigns one or more tags (key-value pairs) to the specified CloudWatch Logs resource. Currently, the only CloudWatch Logs resources that can be tagged are log groups and destinations.
*
* Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.
*
* Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.
*
* You can use the `TagResource` action with a resource that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag.
*
* You can associate as many as 50 tags with a CloudWatch Logs resource.
*/
public suspend inline fun CloudWatchLogsClient.tagResource(crossinline block: TagResourceRequest.Builder.() -> Unit): TagResourceResponse = tagResource(TagResourceRequest.Builder().apply(block).build())
/**
* Tests the filter pattern of a metric filter against a sample of log event messages. You can use this operation to validate the correctness of a metric filter pattern.
*/
public suspend inline fun CloudWatchLogsClient.testMetricFilter(crossinline block: TestMetricFilterRequest.Builder.() -> Unit): TestMetricFilterResponse = testMetricFilter(TestMetricFilterRequest.Builder().apply(block).build())
/**
* The UntagLogGroup operation is on the path to deprecation. We recommend that you use [UntagResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagResource.html) instead.
*
* Removes the specified tags from the specified log group.
*
* To list the tags for a log group, use [ListTagsForResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsForResource.html). To add tags, use [TagResource](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagResource.html).
*
* CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the `aws:Resource/key-name ` or `aws:TagKeys` condition keys.
*/
@Deprecated("Please use the generic tagging API UntagResource")
public suspend inline fun CloudWatchLogsClient.untagLogGroup(crossinline block: UntagLogGroupRequest.Builder.() -> Unit): UntagLogGroupResponse = untagLogGroup(UntagLogGroupRequest.Builder().apply(block).build())
/**
* Removes one or more tags from the specified resource.
*/
public suspend inline fun CloudWatchLogsClient.untagResource(crossinline block: UntagResourceRequest.Builder.() -> Unit): UntagResourceResponse = untagResource(UntagResourceRequest.Builder().apply(block).build())
/**
* Use this operation to *suppress* anomaly detection for a specified anomaly or pattern. If you suppress an anomaly, CloudWatch Logs won't report new occurrences of that anomaly and won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won't report any anomalies related to that pattern.
*
* You must specify either `anomalyId` or `patternId`, but you can't specify both parameters in the same operation.
*
* If you have previously used this operation to suppress detection of a pattern or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. To do this, use this operation and specify the anomaly or pattern to stop suppressing, and omit the `suppressionType` and `suppressionPeriod` parameters.
*/
public suspend inline fun CloudWatchLogsClient.updateAnomaly(crossinline block: UpdateAnomalyRequest.Builder.() -> Unit): UpdateAnomalyResponse = updateAnomaly(UpdateAnomalyRequest.Builder().apply(block).build())
/**
* Use this operation to update the configuration of a [delivery](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_Delivery.html) to change either the S3 path pattern or the format of the delivered logs. You can't use this operation to change the source or destination of the delivery.
*/
public suspend inline fun CloudWatchLogsClient.updateDeliveryConfiguration(crossinline block: UpdateDeliveryConfigurationRequest.Builder.() -> Unit): UpdateDeliveryConfigurationResponse = updateDeliveryConfiguration(UpdateDeliveryConfigurationRequest.Builder().apply(block).build())
/**
* Updates an existing log anomaly detector.
*/
public suspend inline fun CloudWatchLogsClient.updateLogAnomalyDetector(crossinline block: UpdateLogAnomalyDetectorRequest.Builder.() -> Unit): UpdateLogAnomalyDetectorResponse = updateLogAnomalyDetector(UpdateLogAnomalyDetectorRequest.Builder().apply(block).build())