All Downloads are FREE. Search and download functionalities are using the official Maven repository.

commonMain.aws.sdk.kotlin.services.rekognition.model.DetectModerationLabelsResponse.kt Maven / Gradle / Ivy

// Code generated by smithy-kotlin-codegen. DO NOT EDIT!

package aws.sdk.kotlin.services.rekognition.model

import aws.smithy.kotlin.runtime.SdkDsl

public class DetectModerationLabelsResponse private constructor(builder: Builder) {
    /**
     * A list of predicted results for the type of content an image contains. For example, the image content might be from animation, sports, or a video game.
     */
    public val contentTypes: List? = builder.contentTypes
    /**
     * Shows the results of the human in the loop evaluation.
     */
    public val humanLoopActivationOutput: aws.sdk.kotlin.services.rekognition.model.HumanLoopActivationOutput? = builder.humanLoopActivationOutput
    /**
     * Array of detected Moderation labels. For video operations, this includes the time, in milliseconds from the start of the video, they were detected.
     */
    public val moderationLabels: List? = builder.moderationLabels
    /**
     * Version number of the base moderation detection model that was used to detect unsafe content.
     */
    public val moderationModelVersion: kotlin.String? = builder.moderationModelVersion
    /**
     * Identifier of the custom adapter that was used during inference. If during inference the adapter was EXPIRED, then the parameter will not be returned, indicating that a base moderation detection project version was used.
     */
    public val projectVersion: kotlin.String? = builder.projectVersion

    public companion object {
        public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.rekognition.model.DetectModerationLabelsResponse = Builder().apply(block).build()
    }

    override fun toString(): kotlin.String = buildString {
        append("DetectModerationLabelsResponse(")
        append("contentTypes=$contentTypes,")
        append("humanLoopActivationOutput=$humanLoopActivationOutput,")
        append("moderationLabels=$moderationLabels,")
        append("moderationModelVersion=$moderationModelVersion,")
        append("projectVersion=$projectVersion")
        append(")")
    }

    override fun hashCode(): kotlin.Int {
        var result = contentTypes?.hashCode() ?: 0
        result = 31 * result + (humanLoopActivationOutput?.hashCode() ?: 0)
        result = 31 * result + (moderationLabels?.hashCode() ?: 0)
        result = 31 * result + (moderationModelVersion?.hashCode() ?: 0)
        result = 31 * result + (projectVersion?.hashCode() ?: 0)
        return result
    }

    override fun equals(other: kotlin.Any?): kotlin.Boolean {
        if (this === other) return true
        if (other == null || this::class != other::class) return false

        other as DetectModerationLabelsResponse

        if (contentTypes != other.contentTypes) return false
        if (humanLoopActivationOutput != other.humanLoopActivationOutput) return false
        if (moderationLabels != other.moderationLabels) return false
        if (moderationModelVersion != other.moderationModelVersion) return false
        if (projectVersion != other.projectVersion) return false

        return true
    }

    public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.rekognition.model.DetectModerationLabelsResponse = Builder(this).apply(block).build()

    @SdkDsl
    public class Builder {
        /**
         * A list of predicted results for the type of content an image contains. For example, the image content might be from animation, sports, or a video game.
         */
        public var contentTypes: List? = null
        /**
         * Shows the results of the human in the loop evaluation.
         */
        public var humanLoopActivationOutput: aws.sdk.kotlin.services.rekognition.model.HumanLoopActivationOutput? = null
        /**
         * Array of detected Moderation labels. For video operations, this includes the time, in milliseconds from the start of the video, they were detected.
         */
        public var moderationLabels: List? = null
        /**
         * Version number of the base moderation detection model that was used to detect unsafe content.
         */
        public var moderationModelVersion: kotlin.String? = null
        /**
         * Identifier of the custom adapter that was used during inference. If during inference the adapter was EXPIRED, then the parameter will not be returned, indicating that a base moderation detection project version was used.
         */
        public var projectVersion: kotlin.String? = null

        @PublishedApi
        internal constructor()
        @PublishedApi
        internal constructor(x: aws.sdk.kotlin.services.rekognition.model.DetectModerationLabelsResponse) : this() {
            this.contentTypes = x.contentTypes
            this.humanLoopActivationOutput = x.humanLoopActivationOutput
            this.moderationLabels = x.moderationLabels
            this.moderationModelVersion = x.moderationModelVersion
            this.projectVersion = x.projectVersion
        }

        @PublishedApi
        internal fun build(): aws.sdk.kotlin.services.rekognition.model.DetectModerationLabelsResponse = DetectModerationLabelsResponse(this)

        /**
         * construct an [aws.sdk.kotlin.services.rekognition.model.HumanLoopActivationOutput] inside the given [block]
         */
        public fun humanLoopActivationOutput(block: aws.sdk.kotlin.services.rekognition.model.HumanLoopActivationOutput.Builder.() -> kotlin.Unit) {
            this.humanLoopActivationOutput = aws.sdk.kotlin.services.rekognition.model.HumanLoopActivationOutput.invoke(block)
        }

        internal fun correctErrors(): Builder {
            return this
        }
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy