commonMain.aws.sdk.kotlin.services.rekognition.model.DetectFacesResponse.kt Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of rekognition-jvm Show documentation
Show all versions of rekognition-jvm Show documentation
The AWS SDK for Kotlin client for Rekognition
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.rekognition.model
import aws.smithy.kotlin.runtime.SdkDsl
public class DetectFacesResponse private constructor(builder: Builder) {
/**
* Details of each face found in the image.
*/
public val faceDetails: List? = builder.faceDetails
/**
* The value of `OrientationCorrection` is always null.
*
* If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction. The bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.
*
* Amazon Rekognition doesn’t perform image correction for images in .png format and .jpeg images without orientation information in the image Exif metadata. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.
*/
public val orientationCorrection: aws.sdk.kotlin.services.rekognition.model.OrientationCorrection? = builder.orientationCorrection
public companion object {
public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.rekognition.model.DetectFacesResponse = Builder().apply(block).build()
}
override fun toString(): kotlin.String = buildString {
append("DetectFacesResponse(")
append("faceDetails=$faceDetails,")
append("orientationCorrection=$orientationCorrection")
append(")")
}
override fun hashCode(): kotlin.Int {
var result = faceDetails?.hashCode() ?: 0
result = 31 * result + (orientationCorrection?.hashCode() ?: 0)
return result
}
override fun equals(other: kotlin.Any?): kotlin.Boolean {
if (this === other) return true
if (other == null || this::class != other::class) return false
other as DetectFacesResponse
if (faceDetails != other.faceDetails) return false
if (orientationCorrection != other.orientationCorrection) return false
return true
}
public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.rekognition.model.DetectFacesResponse = Builder(this).apply(block).build()
@SdkDsl
public class Builder {
/**
* Details of each face found in the image.
*/
public var faceDetails: List? = null
/**
* The value of `OrientationCorrection` is always null.
*
* If the input image is in .jpeg format, it might contain exchangeable image file format (Exif) metadata that includes the image's orientation. Amazon Rekognition uses this orientation information to perform image correction. The bounding box coordinates are translated to represent object locations after the orientation information in the Exif metadata is used to correct the image orientation. Images in .png format don't contain Exif metadata.
*
* Amazon Rekognition doesn’t perform image correction for images in .png format and .jpeg images without orientation information in the image Exif metadata. The bounding box coordinates aren't translated and represent the object locations before the image is rotated.
*/
public var orientationCorrection: aws.sdk.kotlin.services.rekognition.model.OrientationCorrection? = null
@PublishedApi
internal constructor()
@PublishedApi
internal constructor(x: aws.sdk.kotlin.services.rekognition.model.DetectFacesResponse) : this() {
this.faceDetails = x.faceDetails
this.orientationCorrection = x.orientationCorrection
}
@PublishedApi
internal fun build(): aws.sdk.kotlin.services.rekognition.model.DetectFacesResponse = DetectFacesResponse(this)
internal fun correctErrors(): Builder {
return this
}
}
}