
commonMain.aws.sdk.kotlin.services.rekognition.model.ModerationLabel.kt Maven / Gradle / Ivy
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.rekognition.model
import aws.smithy.kotlin.runtime.SdkDsl
/**
* Provides information about a single type of inappropriate, unwanted, or offensive content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Content moderation in the Amazon Rekognition Developer Guide.
*/
public class ModerationLabel private constructor(builder: Builder) {
/**
* Specifies the confidence that Amazon Rekognition has that the label has been correctly identified.
*
* If you don't specify the `MinConfidence` parameter in the call to `DetectModerationLabels`, the operation returns labels with a confidence value greater than or equal to 50 percent.
*/
public val confidence: kotlin.Float? = builder.confidence
/**
* The label name for the type of unsafe content detected in the image.
*/
public val name: kotlin.String? = builder.name
/**
* The name for the parent label. Labels at the top level of the hierarchy have the parent label `""`.
*/
public val parentName: kotlin.String? = builder.parentName
/**
* The level of the moderation label with regard to its taxonomy, from 1 to 3.
*/
public val taxonomyLevel: kotlin.Int? = builder.taxonomyLevel
public companion object {
public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.rekognition.model.ModerationLabel = Builder().apply(block).build()
}
override fun toString(): kotlin.String = buildString {
append("ModerationLabel(")
append("confidence=$confidence,")
append("name=$name,")
append("parentName=$parentName,")
append("taxonomyLevel=$taxonomyLevel")
append(")")
}
override fun hashCode(): kotlin.Int {
var result = confidence?.hashCode() ?: 0
result = 31 * result + (name?.hashCode() ?: 0)
result = 31 * result + (parentName?.hashCode() ?: 0)
result = 31 * result + (taxonomyLevel ?: 0)
return result
}
override fun equals(other: kotlin.Any?): kotlin.Boolean {
if (this === other) return true
if (other == null || this::class != other::class) return false
other as ModerationLabel
if (!(confidence?.equals(other.confidence) ?: (other.confidence == null))) return false
if (name != other.name) return false
if (parentName != other.parentName) return false
if (taxonomyLevel != other.taxonomyLevel) return false
return true
}
public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.rekognition.model.ModerationLabel = Builder(this).apply(block).build()
@SdkDsl
public class Builder {
/**
* Specifies the confidence that Amazon Rekognition has that the label has been correctly identified.
*
* If you don't specify the `MinConfidence` parameter in the call to `DetectModerationLabels`, the operation returns labels with a confidence value greater than or equal to 50 percent.
*/
public var confidence: kotlin.Float? = null
/**
* The label name for the type of unsafe content detected in the image.
*/
public var name: kotlin.String? = null
/**
* The name for the parent label. Labels at the top level of the hierarchy have the parent label `""`.
*/
public var parentName: kotlin.String? = null
/**
* The level of the moderation label with regard to its taxonomy, from 1 to 3.
*/
public var taxonomyLevel: kotlin.Int? = null
@PublishedApi
internal constructor()
@PublishedApi
internal constructor(x: aws.sdk.kotlin.services.rekognition.model.ModerationLabel) : this() {
this.confidence = x.confidence
this.name = x.name
this.parentName = x.parentName
this.taxonomyLevel = x.taxonomyLevel
}
@PublishedApi
internal fun build(): aws.sdk.kotlin.services.rekognition.model.ModerationLabel = ModerationLabel(this)
internal fun correctErrors(): Builder {
return this
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy