
commonMain.aws.sdk.kotlin.services.rekognition.model.GetSegmentDetectionResponse.kt Maven / Gradle / Ivy
// Code generated by smithy-kotlin-codegen. DO NOT EDIT!
package aws.sdk.kotlin.services.rekognition.model
import aws.smithy.kotlin.runtime.SdkDsl
public class GetSegmentDetectionResponse private constructor(builder: Builder) {
/**
* An array of objects. There can be multiple audio streams. Each `AudioMetadata` object contains metadata for a single audio stream. Audio information in an `AudioMetadata` objects includes the audio codec, the number of audio channels, the duration of the audio stream, and the sample rate. Audio metadata is returned in each page of information returned by `GetSegmentDetection`.
*/
public val audioMetadata: List? = builder.audioMetadata
/**
* Job identifier for the segment detection operation for which you want to obtain results. The job identifer is returned by an initial call to StartSegmentDetection.
*/
public val jobId: kotlin.String? = builder.jobId
/**
* Current status of the segment detection job.
*/
public val jobStatus: aws.sdk.kotlin.services.rekognition.model.VideoJobStatus? = builder.jobStatus
/**
* A job identifier specified in the call to StartSegmentDetection and returned in the job completion notification sent to your Amazon Simple Notification Service topic.
*/
public val jobTag: kotlin.String? = builder.jobTag
/**
* If the previous response was incomplete (because there are more labels to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of text.
*/
public val nextToken: kotlin.String? = builder.nextToken
/**
* An array of segments detected in a video. The array is sorted by the segment types (TECHNICAL_CUE or SHOT) specified in the `SegmentTypes` input parameter of `StartSegmentDetection`. Within each segment type the array is sorted by timestamp values.
*/
public val segments: List? = builder.segments
/**
* An array containing the segment types requested in the call to `StartSegmentDetection`.
*/
public val selectedSegmentTypes: List? = builder.selectedSegmentTypes
/**
* If the job fails, `StatusMessage` provides a descriptive error message.
*/
public val statusMessage: kotlin.String? = builder.statusMessage
/**
* Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as StartLabelDetection use `Video` to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.
*/
public val video: aws.sdk.kotlin.services.rekognition.model.Video? = builder.video
/**
* Currently, Amazon Rekognition Video returns a single object in the `VideoMetadata` array. The object contains information about the video stream in the input file that Amazon Rekognition Video chose to analyze. The `VideoMetadata` object includes the video codec, video format and other information. Video metadata is returned in each page of information returned by `GetSegmentDetection`.
*/
public val videoMetadata: List? = builder.videoMetadata
public companion object {
public operator fun invoke(block: Builder.() -> kotlin.Unit): aws.sdk.kotlin.services.rekognition.model.GetSegmentDetectionResponse = Builder().apply(block).build()
}
override fun toString(): kotlin.String = buildString {
append("GetSegmentDetectionResponse(")
append("audioMetadata=$audioMetadata,")
append("jobId=$jobId,")
append("jobStatus=$jobStatus,")
append("jobTag=$jobTag,")
append("nextToken=$nextToken,")
append("segments=$segments,")
append("selectedSegmentTypes=$selectedSegmentTypes,")
append("statusMessage=$statusMessage,")
append("video=$video,")
append("videoMetadata=$videoMetadata")
append(")")
}
override fun hashCode(): kotlin.Int {
var result = audioMetadata?.hashCode() ?: 0
result = 31 * result + (jobId?.hashCode() ?: 0)
result = 31 * result + (jobStatus?.hashCode() ?: 0)
result = 31 * result + (jobTag?.hashCode() ?: 0)
result = 31 * result + (nextToken?.hashCode() ?: 0)
result = 31 * result + (segments?.hashCode() ?: 0)
result = 31 * result + (selectedSegmentTypes?.hashCode() ?: 0)
result = 31 * result + (statusMessage?.hashCode() ?: 0)
result = 31 * result + (video?.hashCode() ?: 0)
result = 31 * result + (videoMetadata?.hashCode() ?: 0)
return result
}
override fun equals(other: kotlin.Any?): kotlin.Boolean {
if (this === other) return true
if (other == null || this::class != other::class) return false
other as GetSegmentDetectionResponse
if (audioMetadata != other.audioMetadata) return false
if (jobId != other.jobId) return false
if (jobStatus != other.jobStatus) return false
if (jobTag != other.jobTag) return false
if (nextToken != other.nextToken) return false
if (segments != other.segments) return false
if (selectedSegmentTypes != other.selectedSegmentTypes) return false
if (statusMessage != other.statusMessage) return false
if (video != other.video) return false
if (videoMetadata != other.videoMetadata) return false
return true
}
public inline fun copy(block: Builder.() -> kotlin.Unit = {}): aws.sdk.kotlin.services.rekognition.model.GetSegmentDetectionResponse = Builder(this).apply(block).build()
@SdkDsl
public class Builder {
/**
* An array of objects. There can be multiple audio streams. Each `AudioMetadata` object contains metadata for a single audio stream. Audio information in an `AudioMetadata` objects includes the audio codec, the number of audio channels, the duration of the audio stream, and the sample rate. Audio metadata is returned in each page of information returned by `GetSegmentDetection`.
*/
public var audioMetadata: List? = null
/**
* Job identifier for the segment detection operation for which you want to obtain results. The job identifer is returned by an initial call to StartSegmentDetection.
*/
public var jobId: kotlin.String? = null
/**
* Current status of the segment detection job.
*/
public var jobStatus: aws.sdk.kotlin.services.rekognition.model.VideoJobStatus? = null
/**
* A job identifier specified in the call to StartSegmentDetection and returned in the job completion notification sent to your Amazon Simple Notification Service topic.
*/
public var jobTag: kotlin.String? = null
/**
* If the previous response was incomplete (because there are more labels to retrieve), Amazon Rekognition Video returns a pagination token in the response. You can use this pagination token to retrieve the next set of text.
*/
public var nextToken: kotlin.String? = null
/**
* An array of segments detected in a video. The array is sorted by the segment types (TECHNICAL_CUE or SHOT) specified in the `SegmentTypes` input parameter of `StartSegmentDetection`. Within each segment type the array is sorted by timestamp values.
*/
public var segments: List? = null
/**
* An array containing the segment types requested in the call to `StartSegmentDetection`.
*/
public var selectedSegmentTypes: List? = null
/**
* If the job fails, `StatusMessage` provides a descriptive error message.
*/
public var statusMessage: kotlin.String? = null
/**
* Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as StartLabelDetection use `Video` to specify a video for analysis. The supported file formats are .mp4, .mov and .avi.
*/
public var video: aws.sdk.kotlin.services.rekognition.model.Video? = null
/**
* Currently, Amazon Rekognition Video returns a single object in the `VideoMetadata` array. The object contains information about the video stream in the input file that Amazon Rekognition Video chose to analyze. The `VideoMetadata` object includes the video codec, video format and other information. Video metadata is returned in each page of information returned by `GetSegmentDetection`.
*/
public var videoMetadata: List? = null
@PublishedApi
internal constructor()
@PublishedApi
internal constructor(x: aws.sdk.kotlin.services.rekognition.model.GetSegmentDetectionResponse) : this() {
this.audioMetadata = x.audioMetadata
this.jobId = x.jobId
this.jobStatus = x.jobStatus
this.jobTag = x.jobTag
this.nextToken = x.nextToken
this.segments = x.segments
this.selectedSegmentTypes = x.selectedSegmentTypes
this.statusMessage = x.statusMessage
this.video = x.video
this.videoMetadata = x.videoMetadata
}
@PublishedApi
internal fun build(): aws.sdk.kotlin.services.rekognition.model.GetSegmentDetectionResponse = GetSegmentDetectionResponse(this)
/**
* construct an [aws.sdk.kotlin.services.rekognition.model.Video] inside the given [block]
*/
public fun video(block: aws.sdk.kotlin.services.rekognition.model.Video.Builder.() -> kotlin.Unit) {
this.video = aws.sdk.kotlin.services.rekognition.model.Video.invoke(block)
}
internal fun correctErrors(): Builder {
return this
}
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy