All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.pulumi.aws.fsx.kotlin.DataRepositoryAssociationArgs.kt Maven / Gradle / Ivy

Go to download

Build cloud applications and infrastructure by combining the safety and reliability of infrastructure as code with the power of the Kotlin programming language.

There is a newer version: 6.57.0.0
Show newest version
@file:Suppress("NAME_SHADOWING", "DEPRECATION")

package com.pulumi.aws.fsx.kotlin

import com.pulumi.aws.fsx.DataRepositoryAssociationArgs.builder
import com.pulumi.aws.fsx.kotlin.inputs.DataRepositoryAssociationS3Args
import com.pulumi.aws.fsx.kotlin.inputs.DataRepositoryAssociationS3ArgsBuilder
import com.pulumi.core.Output
import com.pulumi.core.Output.of
import com.pulumi.kotlin.ConvertibleToJava
import com.pulumi.kotlin.PulumiTagMarker
import com.pulumi.kotlin.applySuspend
import kotlin.Boolean
import kotlin.Int
import kotlin.Pair
import kotlin.String
import kotlin.Suppress
import kotlin.Unit
import kotlin.collections.Map
import kotlin.jvm.JvmName

/**
 * Manages a FSx for Lustre Data Repository Association. See [Linking your file system to an S3 bucket](https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-dra-linked-data-repo.html) for more information.
 * > **NOTE:** Data Repository Associations are only compatible with AWS FSx for Lustre File Systems and `PERSISTENT_2` deployment type.
 * ## Example Usage
 * 
 * ```typescript
 * import * as pulumi from "@pulumi/pulumi";
 * import * as aws from "@pulumi/aws";
 * const example = new aws.s3.BucketV2("example", {bucket: "my-bucket"});
 * const exampleBucketAclV2 = new aws.s3.BucketAclV2("example", {
 *     bucket: example.id,
 *     acl: "private",
 * });
 * const exampleLustreFileSystem = new aws.fsx.LustreFileSystem("example", {
 *     storageCapacity: 1200,
 *     subnetIds: exampleAwsSubnet.id,
 *     deploymentType: "PERSISTENT_2",
 *     perUnitStorageThroughput: 125,
 * });
 * const exampleDataRepositoryAssociation = new aws.fsx.DataRepositoryAssociation("example", {
 *     fileSystemId: exampleLustreFileSystem.id,
 *     dataRepositoryPath: pulumi.interpolate`s3://${example.id}`,
 *     fileSystemPath: "/my-bucket",
 *     s3: {
 *         autoExportPolicy: {
 *             events: [
 *                 "NEW",
 *                 "CHANGED",
 *                 "DELETED",
 *             ],
 *         },
 *         autoImportPolicy: {
 *             events: [
 *                 "NEW",
 *                 "CHANGED",
 *                 "DELETED",
 *             ],
 *         },
 *     },
 * });
 * ```
 * ```python
 * import pulumi
 * import pulumi_aws as aws
 * example = aws.s3.BucketV2("example", bucket="my-bucket")
 * example_bucket_acl_v2 = aws.s3.BucketAclV2("example",
 *     bucket=example.id,
 *     acl="private")
 * example_lustre_file_system = aws.fsx.LustreFileSystem("example",
 *     storage_capacity=1200,
 *     subnet_ids=example_aws_subnet["id"],
 *     deployment_type="PERSISTENT_2",
 *     per_unit_storage_throughput=125)
 * example_data_repository_association = aws.fsx.DataRepositoryAssociation("example",
 *     file_system_id=example_lustre_file_system.id,
 *     data_repository_path=example.id.apply(lambda id: f"s3://{id}"),
 *     file_system_path="/my-bucket",
 *     s3={
 *         "auto_export_policy": {
 *             "events": [
 *                 "NEW",
 *                 "CHANGED",
 *                 "DELETED",
 *             ],
 *         },
 *         "auto_import_policy": {
 *             "events": [
 *                 "NEW",
 *                 "CHANGED",
 *                 "DELETED",
 *             ],
 *         },
 *     })
 * ```
 * ```csharp
 * using System.Collections.Generic;
 * using System.Linq;
 * using Pulumi;
 * using Aws = Pulumi.Aws;
 * return await Deployment.RunAsync(() =>
 * {
 *     var example = new Aws.S3.BucketV2("example", new()
 *     {
 *         Bucket = "my-bucket",
 *     });
 *     var exampleBucketAclV2 = new Aws.S3.BucketAclV2("example", new()
 *     {
 *         Bucket = example.Id,
 *         Acl = "private",
 *     });
 *     var exampleLustreFileSystem = new Aws.Fsx.LustreFileSystem("example", new()
 *     {
 *         StorageCapacity = 1200,
 *         SubnetIds = exampleAwsSubnet.Id,
 *         DeploymentType = "PERSISTENT_2",
 *         PerUnitStorageThroughput = 125,
 *     });
 *     var exampleDataRepositoryAssociation = new Aws.Fsx.DataRepositoryAssociation("example", new()
 *     {
 *         FileSystemId = exampleLustreFileSystem.Id,
 *         DataRepositoryPath = example.Id.Apply(id => $"s3://{id}"),
 *         FileSystemPath = "/my-bucket",
 *         S3 = new Aws.Fsx.Inputs.DataRepositoryAssociationS3Args
 *         {
 *             AutoExportPolicy = new Aws.Fsx.Inputs.DataRepositoryAssociationS3AutoExportPolicyArgs
 *             {
 *                 Events = new[]
 *                 {
 *                     "NEW",
 *                     "CHANGED",
 *                     "DELETED",
 *                 },
 *             },
 *             AutoImportPolicy = new Aws.Fsx.Inputs.DataRepositoryAssociationS3AutoImportPolicyArgs
 *             {
 *                 Events = new[]
 *                 {
 *                     "NEW",
 *                     "CHANGED",
 *                     "DELETED",
 *                 },
 *             },
 *         },
 *     });
 * });
 * ```
 * ```go
 * package main
 * import (
 * 	"fmt"
 * 	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/fsx"
 * 	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3"
 * 	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
 * )
 * func main() {
 * 	pulumi.Run(func(ctx *pulumi.Context) error {
 * 		example, err := s3.NewBucketV2(ctx, "example", &s3.BucketV2Args{
 * 			Bucket: pulumi.String("my-bucket"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = s3.NewBucketAclV2(ctx, "example", &s3.BucketAclV2Args{
 * 			Bucket: example.ID(),
 * 			Acl:    pulumi.String("private"),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		exampleLustreFileSystem, err := fsx.NewLustreFileSystem(ctx, "example", &fsx.LustreFileSystemArgs{
 * 			StorageCapacity:          pulumi.Int(1200),
 * 			SubnetIds:                pulumi.Any(exampleAwsSubnet.Id),
 * 			DeploymentType:           pulumi.String("PERSISTENT_2"),
 * 			PerUnitStorageThroughput: pulumi.Int(125),
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		_, err = fsx.NewDataRepositoryAssociation(ctx, "example", &fsx.DataRepositoryAssociationArgs{
 * 			FileSystemId: exampleLustreFileSystem.ID(),
 * 			DataRepositoryPath: example.ID().ApplyT(func(id string) (string, error) {
 * 				return fmt.Sprintf("s3://%v", id), nil
 * 			}).(pulumi.StringOutput),
 * 			FileSystemPath: pulumi.String("/my-bucket"),
 * 			S3: &fsx.DataRepositoryAssociationS3Args{
 * 				AutoExportPolicy: &fsx.DataRepositoryAssociationS3AutoExportPolicyArgs{
 * 					Events: pulumi.StringArray{
 * 						pulumi.String("NEW"),
 * 						pulumi.String("CHANGED"),
 * 						pulumi.String("DELETED"),
 * 					},
 * 				},
 * 				AutoImportPolicy: &fsx.DataRepositoryAssociationS3AutoImportPolicyArgs{
 * 					Events: pulumi.StringArray{
 * 						pulumi.String("NEW"),
 * 						pulumi.String("CHANGED"),
 * 						pulumi.String("DELETED"),
 * 					},
 * 				},
 * 			},
 * 		})
 * 		if err != nil {
 * 			return err
 * 		}
 * 		return nil
 * 	})
 * }
 * ```
 * ```java
 * package generated_program;
 * import com.pulumi.Context;
 * import com.pulumi.Pulumi;
 * import com.pulumi.core.Output;
 * import com.pulumi.aws.s3.BucketV2;
 * import com.pulumi.aws.s3.BucketV2Args;
 * import com.pulumi.aws.s3.BucketAclV2;
 * import com.pulumi.aws.s3.BucketAclV2Args;
 * import com.pulumi.aws.fsx.LustreFileSystem;
 * import com.pulumi.aws.fsx.LustreFileSystemArgs;
 * import com.pulumi.aws.fsx.DataRepositoryAssociation;
 * import com.pulumi.aws.fsx.DataRepositoryAssociationArgs;
 * import com.pulumi.aws.fsx.inputs.DataRepositoryAssociationS3Args;
 * import com.pulumi.aws.fsx.inputs.DataRepositoryAssociationS3AutoExportPolicyArgs;
 * import com.pulumi.aws.fsx.inputs.DataRepositoryAssociationS3AutoImportPolicyArgs;
 * import java.util.List;
 * import java.util.ArrayList;
 * import java.util.Map;
 * import java.io.File;
 * import java.nio.file.Files;
 * import java.nio.file.Paths;
 * public class App {
 *     public static void main(String[] args) {
 *         Pulumi.run(App::stack);
 *     }
 *     public static void stack(Context ctx) {
 *         var example = new BucketV2("example", BucketV2Args.builder()
 *             .bucket("my-bucket")
 *             .build());
 *         var exampleBucketAclV2 = new BucketAclV2("exampleBucketAclV2", BucketAclV2Args.builder()
 *             .bucket(example.id())
 *             .acl("private")
 *             .build());
 *         var exampleLustreFileSystem = new LustreFileSystem("exampleLustreFileSystem", LustreFileSystemArgs.builder()
 *             .storageCapacity(1200)
 *             .subnetIds(exampleAwsSubnet.id())
 *             .deploymentType("PERSISTENT_2")
 *             .perUnitStorageThroughput(125)
 *             .build());
 *         var exampleDataRepositoryAssociation = new DataRepositoryAssociation("exampleDataRepositoryAssociation", DataRepositoryAssociationArgs.builder()
 *             .fileSystemId(exampleLustreFileSystem.id())
 *             .dataRepositoryPath(example.id().applyValue(id -> String.format("s3://%s", id)))
 *             .fileSystemPath("/my-bucket")
 *             .s3(DataRepositoryAssociationS3Args.builder()
 *                 .autoExportPolicy(DataRepositoryAssociationS3AutoExportPolicyArgs.builder()
 *                     .events(
 *                         "NEW",
 *                         "CHANGED",
 *                         "DELETED")
 *                     .build())
 *                 .autoImportPolicy(DataRepositoryAssociationS3AutoImportPolicyArgs.builder()
 *                     .events(
 *                         "NEW",
 *                         "CHANGED",
 *                         "DELETED")
 *                     .build())
 *                 .build())
 *             .build());
 *     }
 * }
 * ```
 * ```yaml
 * resources:
 *   example:
 *     type: aws:s3:BucketV2
 *     properties:
 *       bucket: my-bucket
 *   exampleBucketAclV2:
 *     type: aws:s3:BucketAclV2
 *     name: example
 *     properties:
 *       bucket: ${example.id}
 *       acl: private
 *   exampleLustreFileSystem:
 *     type: aws:fsx:LustreFileSystem
 *     name: example
 *     properties:
 *       storageCapacity: 1200
 *       subnetIds: ${exampleAwsSubnet.id}
 *       deploymentType: PERSISTENT_2
 *       perUnitStorageThroughput: 125
 *   exampleDataRepositoryAssociation:
 *     type: aws:fsx:DataRepositoryAssociation
 *     name: example
 *     properties:
 *       fileSystemId: ${exampleLustreFileSystem.id}
 *       dataRepositoryPath: s3://${example.id}
 *       fileSystemPath: /my-bucket
 *       s3:
 *         autoExportPolicy:
 *           events:
 *             - NEW
 *             - CHANGED
 *             - DELETED
 *         autoImportPolicy:
 *           events:
 *             - NEW
 *             - CHANGED
 *             - DELETED
 * ```
 * 
 * ## Import
 * Using `pulumi import`, import FSx Data Repository Associations using the `id`. For example:
 * ```sh
 * $ pulumi import aws:fsx/dataRepositoryAssociation:DataRepositoryAssociation example dra-0b1cfaeca11088b10
 * ```
 * @property batchImportMetaDataOnCreate Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to `false`.
 * @property dataRepositoryPath The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
 * @property deleteDataInFilesystem Set to true to delete files from the file system upon deleting this data repository association. Defaults to `false`.
 * @property fileSystemId The ID of the Amazon FSx file system to on which to create a data repository association.
 * @property fileSystemPath A path on the file system that points to a high-level directory (such as `/ns1/`) or subdirectory (such as `/ns1/subdir/`) that will be mapped 1-1 with `data_repository_path`. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path `/ns1/`, then you cannot link another data repository with file system path `/ns1/ns2`. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.
 * @property importedFileChunkSize For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
 * @property s3 See the `s3` configuration block. Max of 1.
 * The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.
 * @property tags A map of tags to assign to the data repository association. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
 */
public data class DataRepositoryAssociationArgs(
    public val batchImportMetaDataOnCreate: Output? = null,
    public val dataRepositoryPath: Output? = null,
    public val deleteDataInFilesystem: Output? = null,
    public val fileSystemId: Output? = null,
    public val fileSystemPath: Output? = null,
    public val importedFileChunkSize: Output? = null,
    public val s3: Output? = null,
    public val tags: Output>? = null,
) : ConvertibleToJava {
    override fun toJava(): com.pulumi.aws.fsx.DataRepositoryAssociationArgs =
        com.pulumi.aws.fsx.DataRepositoryAssociationArgs.builder()
            .batchImportMetaDataOnCreate(batchImportMetaDataOnCreate?.applyValue({ args0 -> args0 }))
            .dataRepositoryPath(dataRepositoryPath?.applyValue({ args0 -> args0 }))
            .deleteDataInFilesystem(deleteDataInFilesystem?.applyValue({ args0 -> args0 }))
            .fileSystemId(fileSystemId?.applyValue({ args0 -> args0 }))
            .fileSystemPath(fileSystemPath?.applyValue({ args0 -> args0 }))
            .importedFileChunkSize(importedFileChunkSize?.applyValue({ args0 -> args0 }))
            .s3(s3?.applyValue({ args0 -> args0.let({ args0 -> args0.toJava() }) }))
            .tags(
                tags?.applyValue({ args0 ->
                    args0.map({ args0 ->
                        args0.key.to(args0.value)
                    }).toMap()
                }),
            ).build()
}

/**
 * Builder for [DataRepositoryAssociationArgs].
 */
@PulumiTagMarker
public class DataRepositoryAssociationArgsBuilder internal constructor() {
    private var batchImportMetaDataOnCreate: Output? = null

    private var dataRepositoryPath: Output? = null

    private var deleteDataInFilesystem: Output? = null

    private var fileSystemId: Output? = null

    private var fileSystemPath: Output? = null

    private var importedFileChunkSize: Output? = null

    private var s3: Output? = null

    private var tags: Output>? = null

    /**
     * @param value Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to `false`.
     */
    @JvmName("ppxbjwltfekeiirk")
    public suspend fun batchImportMetaDataOnCreate(`value`: Output) {
        this.batchImportMetaDataOnCreate = value
    }

    /**
     * @param value The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
     */
    @JvmName("stpqutcioimbktbe")
    public suspend fun dataRepositoryPath(`value`: Output) {
        this.dataRepositoryPath = value
    }

    /**
     * @param value Set to true to delete files from the file system upon deleting this data repository association. Defaults to `false`.
     */
    @JvmName("jfyxkekdasjchidw")
    public suspend fun deleteDataInFilesystem(`value`: Output) {
        this.deleteDataInFilesystem = value
    }

    /**
     * @param value The ID of the Amazon FSx file system to on which to create a data repository association.
     */
    @JvmName("yblkmemjmywwahhs")
    public suspend fun fileSystemId(`value`: Output) {
        this.fileSystemId = value
    }

    /**
     * @param value A path on the file system that points to a high-level directory (such as `/ns1/`) or subdirectory (such as `/ns1/subdir/`) that will be mapped 1-1 with `data_repository_path`. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path `/ns1/`, then you cannot link another data repository with file system path `/ns1/ns2`. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.
     */
    @JvmName("qogtbbjcpigrxxbn")
    public suspend fun fileSystemPath(`value`: Output) {
        this.fileSystemPath = value
    }

    /**
     * @param value For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
     */
    @JvmName("eifotlwcfhcjgbul")
    public suspend fun importedFileChunkSize(`value`: Output) {
        this.importedFileChunkSize = value
    }

    /**
     * @param value See the `s3` configuration block. Max of 1.
     * The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.
     */
    @JvmName("lkhcibwyqkorkrfh")
    public suspend fun s3(`value`: Output) {
        this.s3 = value
    }

    /**
     * @param value A map of tags to assign to the data repository association. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
     */
    @JvmName("ftbuohugvgaiuler")
    public suspend fun tags(`value`: Output>) {
        this.tags = value
    }

    /**
     * @param value Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Defaults to `false`.
     */
    @JvmName("wjxypwhqdokshita")
    public suspend fun batchImportMetaDataOnCreate(`value`: Boolean?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.batchImportMetaDataOnCreate = mapped
    }

    /**
     * @param value The path to the Amazon S3 data repository that will be linked to the file system. The path must be an S3 bucket s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. The same S3 bucket cannot be linked more than once to the same file system.
     */
    @JvmName("asgsgwgsqkioixvi")
    public suspend fun dataRepositoryPath(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.dataRepositoryPath = mapped
    }

    /**
     * @param value Set to true to delete files from the file system upon deleting this data repository association. Defaults to `false`.
     */
    @JvmName("tpteakikjiedkvvh")
    public suspend fun deleteDataInFilesystem(`value`: Boolean?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.deleteDataInFilesystem = mapped
    }

    /**
     * @param value The ID of the Amazon FSx file system to on which to create a data repository association.
     */
    @JvmName("qgynccgskfqwtpmw")
    public suspend fun fileSystemId(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.fileSystemId = mapped
    }

    /**
     * @param value A path on the file system that points to a high-level directory (such as `/ns1/`) or subdirectory (such as `/ns1/subdir/`) that will be mapped 1-1 with `data_repository_path`. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path `/ns1/`, then you cannot link another data repository with file system path `/ns1/ns2`. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.
     */
    @JvmName("ivvyupmbqxpqgoci")
    public suspend fun fileSystemPath(`value`: String?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.fileSystemPath = mapped
    }

    /**
     * @param value For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.
     */
    @JvmName("otdgqpeyuilfoucp")
    public suspend fun importedFileChunkSize(`value`: Int?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.importedFileChunkSize = mapped
    }

    /**
     * @param value See the `s3` configuration block. Max of 1.
     * The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.
     */
    @JvmName("pgxhfnskitbyofaa")
    public suspend fun s3(`value`: DataRepositoryAssociationS3Args?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.s3 = mapped
    }

    /**
     * @param argument See the `s3` configuration block. Max of 1.
     * The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.
     */
    @JvmName("ixkrrtvutmeymmos")
    public suspend fun s3(argument: suspend DataRepositoryAssociationS3ArgsBuilder.() -> Unit) {
        val toBeMapped = DataRepositoryAssociationS3ArgsBuilder().applySuspend { argument() }.build()
        val mapped = of(toBeMapped)
        this.s3 = mapped
    }

    /**
     * @param value A map of tags to assign to the data repository association. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
     */
    @JvmName("obmbseqjrkhigayf")
    public suspend fun tags(`value`: Map?) {
        val toBeMapped = value
        val mapped = toBeMapped?.let({ args0 -> of(args0) })
        this.tags = mapped
    }

    /**
     * @param values A map of tags to assign to the data repository association. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
     */
    @JvmName("syoyicfdtmgdtgjw")
    public fun tags(vararg values: Pair) {
        val toBeMapped = values.toMap()
        val mapped = toBeMapped.let({ args0 -> of(args0) })
        this.tags = mapped
    }

    internal fun build(): DataRepositoryAssociationArgs = DataRepositoryAssociationArgs(
        batchImportMetaDataOnCreate = batchImportMetaDataOnCreate,
        dataRepositoryPath = dataRepositoryPath,
        deleteDataInFilesystem = deleteDataInFilesystem,
        fileSystemId = fileSystemId,
        fileSystemPath = fileSystemPath,
        importedFileChunkSize = importedFileChunkSize,
        s3 = s3,
        tags = tags,
    )
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy