All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerConfiguration.kt Maven / Gradle / Ivy

There is a newer version: 1.3.77
Show newest version
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.bedrockagent.model

import aws.smithy.kotlin.runtime.SdkDsl

/**
 * The configuration of web URLs that you want to crawl. You should be authorized to crawl the URLs.
 */
public class WebCrawlerConfiguration private constructor(builder: Builder) {
    /**
     * The configuration of crawl limits for the web URLs.
     */
    public val crawlerLimits: aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerLimits? = builder.crawlerLimits
    /**
     * A list of one or more exclusion regular expression patterns to exclude certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled.
     */
    public val exclusionFilters: List? = builder.exclusionFilters
    /**
     * A list of one or more inclusion regular expression patterns to include certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled.
     */
    public val inclusionFilters: List? = builder.inclusionFilters
    /**
     * The scope of what is crawled for your URLs.
     *
     * You can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL "https://docs.aws.amazon.com/bedrock/latest/userguide/" and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain "aws.amazon.com" can also include sub domain "docs.aws.amazon.com".
     */
    public val scope: aws.sdk.kotlin.services.bedrockagent.model.WebScopeType? = builder.scope

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerConfiguration = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("WebCrawlerConfiguration(")
        append("crawlerLimits=$crawlerLimits,")
        append("exclusionFilters=*** Sensitive Data Redacted ***,")
        append("inclusionFilters=*** Sensitive Data Redacted ***,")
        append("scope=$scope")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = crawlerLimits?.hashCode() ?: 0
        result = 31 * result + (exclusionFilters?.hashCode() ?: 0)
        result = 31 * result + (inclusionFilters?.hashCode() ?: 0)
        result = 31 * result + (scope?.hashCode() ?: 0)
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as WebCrawlerConfiguration

        if (crawlerLimits != other.crawlerLimits) return false
        if (exclusionFilters != other.exclusionFilters) return false
        if (inclusionFilters != other.inclusionFilters) return false
        if (scope != other.scope) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerConfiguration = Builder(this).apply(block).build()

    @SdkDsl
    public class Builder {
        /**
         * The configuration of crawl limits for the web URLs.
         */
        public var crawlerLimits: aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerLimits? = null
        /**
         * A list of one or more exclusion regular expression patterns to exclude certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled.
         */
        public var exclusionFilters: List? = null
        /**
         * A list of one or more inclusion regular expression patterns to include certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled.
         */
        public var inclusionFilters: List? = null
        /**
         * The scope of what is crawled for your URLs.
         *
         * You can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL "https://docs.aws.amazon.com/bedrock/latest/userguide/" and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain "aws.amazon.com" can also include sub domain "docs.aws.amazon.com".
         */
        public var scope: aws.sdk.kotlin.services.bedrockagent.model.WebScopeType? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerConfiguration) : this() {
            this.crawlerLimits = x.crawlerLimits
            this.exclusionFilters = x.exclusionFilters
            this.inclusionFilters = x.inclusionFilters
            this.scope = x.scope
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerConfiguration = WebCrawlerConfiguration(this)

        /**
         * construct an [aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerLimits] inside the given [block]
         */
        public fun crawlerLimits(block: aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerLimits.Builder.() -> kotlin.Unit) {
            this.crawlerLimits = aws.sdk.kotlin.services.bedrockagent.model.WebCrawlerLimits.invoke(block)
        }

        internal fun correctErrors(): Builder {
            return this
        }
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy