
com.azure.resourcemanager.datafactory.models.AzureDataLakeStoreSink Maven / Gradle / Ivy
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.datafactory.models;
import com.azure.core.annotation.Fluent;
import com.azure.json.JsonReader;
import com.azure.json.JsonToken;
import com.azure.json.JsonWriter;
import java.io.IOException;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* A copy activity Azure Data Lake Store sink.
*/
@Fluent
public final class AzureDataLakeStoreSink extends CopySink {
/*
* Copy sink type.
*/
private String type = "AzureDataLakeStoreSink";
/*
* The type of copy behavior for copy sink. Type: string (or Expression with resultType string).
*/
private Object copyBehavior;
/*
* Single File Parallel.
*/
private Object enableAdlsSingleFileParallel;
/**
* Creates an instance of AzureDataLakeStoreSink class.
*/
public AzureDataLakeStoreSink() {
}
/**
* Get the type property: Copy sink type.
*
* @return the type value.
*/
@Override
public String type() {
return this.type;
}
/**
* Get the copyBehavior property: The type of copy behavior for copy sink. Type: string (or Expression with
* resultType string).
*
* @return the copyBehavior value.
*/
public Object copyBehavior() {
return this.copyBehavior;
}
/**
* Set the copyBehavior property: The type of copy behavior for copy sink. Type: string (or Expression with
* resultType string).
*
* @param copyBehavior the copyBehavior value to set.
* @return the AzureDataLakeStoreSink object itself.
*/
public AzureDataLakeStoreSink withCopyBehavior(Object copyBehavior) {
this.copyBehavior = copyBehavior;
return this;
}
/**
* Get the enableAdlsSingleFileParallel property: Single File Parallel.
*
* @return the enableAdlsSingleFileParallel value.
*/
public Object enableAdlsSingleFileParallel() {
return this.enableAdlsSingleFileParallel;
}
/**
* Set the enableAdlsSingleFileParallel property: Single File Parallel.
*
* @param enableAdlsSingleFileParallel the enableAdlsSingleFileParallel value to set.
* @return the AzureDataLakeStoreSink object itself.
*/
public AzureDataLakeStoreSink withEnableAdlsSingleFileParallel(Object enableAdlsSingleFileParallel) {
this.enableAdlsSingleFileParallel = enableAdlsSingleFileParallel;
return this;
}
/**
* {@inheritDoc}
*/
@Override
public AzureDataLakeStoreSink withWriteBatchSize(Object writeBatchSize) {
super.withWriteBatchSize(writeBatchSize);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public AzureDataLakeStoreSink withWriteBatchTimeout(Object writeBatchTimeout) {
super.withWriteBatchTimeout(writeBatchTimeout);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public AzureDataLakeStoreSink withSinkRetryCount(Object sinkRetryCount) {
super.withSinkRetryCount(sinkRetryCount);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public AzureDataLakeStoreSink withSinkRetryWait(Object sinkRetryWait) {
super.withSinkRetryWait(sinkRetryWait);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public AzureDataLakeStoreSink withMaxConcurrentConnections(Object maxConcurrentConnections) {
super.withMaxConcurrentConnections(maxConcurrentConnections);
return this;
}
/**
* {@inheritDoc}
*/
@Override
public AzureDataLakeStoreSink withDisableMetricsCollection(Object disableMetricsCollection) {
super.withDisableMetricsCollection(disableMetricsCollection);
return this;
}
/**
* Validates the instance.
*
* @throws IllegalArgumentException thrown if the instance is not valid.
*/
@Override
public void validate() {
}
/**
* {@inheritDoc}
*/
@Override
public JsonWriter toJson(JsonWriter jsonWriter) throws IOException {
jsonWriter.writeStartObject();
jsonWriter.writeUntypedField("writeBatchSize", writeBatchSize());
jsonWriter.writeUntypedField("writeBatchTimeout", writeBatchTimeout());
jsonWriter.writeUntypedField("sinkRetryCount", sinkRetryCount());
jsonWriter.writeUntypedField("sinkRetryWait", sinkRetryWait());
jsonWriter.writeUntypedField("maxConcurrentConnections", maxConcurrentConnections());
jsonWriter.writeUntypedField("disableMetricsCollection", disableMetricsCollection());
jsonWriter.writeStringField("type", this.type);
jsonWriter.writeUntypedField("copyBehavior", this.copyBehavior);
jsonWriter.writeUntypedField("enableAdlsSingleFileParallel", this.enableAdlsSingleFileParallel);
if (additionalProperties() != null) {
for (Map.Entry additionalProperty : additionalProperties().entrySet()) {
jsonWriter.writeUntypedField(additionalProperty.getKey(), additionalProperty.getValue());
}
}
return jsonWriter.writeEndObject();
}
/**
* Reads an instance of AzureDataLakeStoreSink from the JsonReader.
*
* @param jsonReader The JsonReader being read.
* @return An instance of AzureDataLakeStoreSink if the JsonReader was pointing to an instance of it, or null if it
* was pointing to JSON null.
* @throws IOException If an error occurs while reading the AzureDataLakeStoreSink.
*/
public static AzureDataLakeStoreSink fromJson(JsonReader jsonReader) throws IOException {
return jsonReader.readObject(reader -> {
AzureDataLakeStoreSink deserializedAzureDataLakeStoreSink = new AzureDataLakeStoreSink();
Map additionalProperties = null;
while (reader.nextToken() != JsonToken.END_OBJECT) {
String fieldName = reader.getFieldName();
reader.nextToken();
if ("writeBatchSize".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.withWriteBatchSize(reader.readUntyped());
} else if ("writeBatchTimeout".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.withWriteBatchTimeout(reader.readUntyped());
} else if ("sinkRetryCount".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.withSinkRetryCount(reader.readUntyped());
} else if ("sinkRetryWait".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.withSinkRetryWait(reader.readUntyped());
} else if ("maxConcurrentConnections".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.withMaxConcurrentConnections(reader.readUntyped());
} else if ("disableMetricsCollection".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.withDisableMetricsCollection(reader.readUntyped());
} else if ("type".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.type = reader.getString();
} else if ("copyBehavior".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.copyBehavior = reader.readUntyped();
} else if ("enableAdlsSingleFileParallel".equals(fieldName)) {
deserializedAzureDataLakeStoreSink.enableAdlsSingleFileParallel = reader.readUntyped();
} else {
if (additionalProperties == null) {
additionalProperties = new LinkedHashMap<>();
}
additionalProperties.put(fieldName, reader.readUntyped());
}
}
deserializedAzureDataLakeStoreSink.withAdditionalProperties(additionalProperties);
return deserializedAzureDataLakeStoreSink;
});
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy