com.google.api.services.bigquery.model.JobConfigurationLoad Maven / Gradle / Ivy
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.bigquery.model;
/**
* JobConfigurationLoad contains the configuration properties for loading data into a destination
* table.
*
* This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the BigQuery API. For a detailed explanation see:
* https://developers.google.com/api-client-library/java/google-http-java-client/json
*
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class JobConfigurationLoad extends com.google.api.client.json.GenericJson {
/**
* Optional. Accept rows that are missing trailing optional columns. The missing values are
* treated as nulls. If false, records with missing trailing columns are treated as bad records,
* and if there are too many bad records, an invalid error is returned in the job result. The
* default value is false. Only applicable to CSV, ignored for other formats.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean allowJaggedRows;
/**
* Indicates if BigQuery should allow quoted data sections that contain newline characters in a
* CSV file. The default value is false.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean allowQuotedNewlines;
/**
* Optional. Indicates if we should automatically infer the options and schema for CSV and JSON
* sources.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean autodetect;
/**
* Clustering specification for the destination table.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private Clustering clustering;
/**
* Optional. Character map supported for column names in CSV/Parquet loads. Defaults to STRICT and
* can be overridden by Project Config Service. Using this option with unsupporting load formats
* will result in an error.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String columnNameCharacterMap;
/**
* Optional. Connection properties which can modify the load job behavior. Currently, only the
* 'session_id' connection property is supported, and is used to resolve _SESSION appearing as the
* dataset id.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List connectionProperties;
static {
// hack to force ProGuard to consider ConnectionProperty used, since otherwise it would be stripped out
// see https://github.com/google/google-api-java-client/issues/543
com.google.api.client.util.Data.nullOf(ConnectionProperty.class);
}
/**
* Optional. [Experimental] Configures the load job to copy files directly to the destination
* BigLake managed table, bypassing file content reading and rewriting. Copying files only is
* supported when all the following are true: * `source_uris` are located in the same Cloud
* Storage location as the destination table's `storage_uri` location. * `source_format` is
* `PARQUET`. * `destination_table` is an existing BigLake managed table. The table's schema does
* not have flexible column names. The table's columns do not have type parameters other than
* precision and scale. * No options other than the above are specified.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean copyFilesOnly;
/**
* Optional. Specifies whether the job is allowed to create new tables. The following values are
* supported: * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. *
* CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in
* the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions
* occur as one atomic update upon job completion.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String createDisposition;
/**
* Optional. If this property is true, the job creates a new session using a randomly generated
* session_id. To continue using a created session with subsequent queries, pass the existing
* session identifier as a `ConnectionProperty` value. The session identifier is returned as part
* of the `SessionInfo` message within the query statistics. The new session's location will be
* set to `Job.JobReference.location` if it is present, otherwise it's set to the default location
* based on existing routing logic.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean createSession;
/**
* Defines the list of possible SQL data types to which the source decimal values are converted.
* This list and the precision and the scale parameters of the decimal field determine the target
* type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the
* specified list and if it supports the precision and the scale. STRING supports all precision
* and scale values. If none of the listed types supports the precision and the scale, the type
* supporting the widest range in the specified list is picked, and if a value exceeds the
* supported range when reading the data, an error will be thrown. Example: Suppose the value of
* this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9)
* -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot
* hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value
* exeeds supported range). This field cannot contain duplicate types. The order of the types in
* this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC",
* "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC",
* "STRING"] for ORC and ["NUMERIC"] for the other file formats.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List decimalTargetTypes;
/**
* Custom encryption configuration (e.g., Cloud KMS keys)
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private EncryptionConfiguration destinationEncryptionConfiguration;
/**
* [Required] The destination table to load the data into.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private TableReference destinationTable;
/**
* Optional. [Experimental] Properties with which to create the destination table if it is new.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private DestinationTableProperties destinationTableProperties;
/**
* Optional. The character encoding of the data. The supported values are UTF-8, ISO-8859-1,
* UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. BigQuery decodes the
* data after the raw, binary data has been split using the values of the `quote` and
* `fieldDelimiter` properties. If you don't specify an encoding, or if you specify a UTF-8
* encoding when the CSV file is not UTF-8 encoded, BigQuery attempts to convert the data to
* UTF-8. Generally, your data loads successfully, but it may not match byte-for-byte what you
* expect. To avoid this, specify the correct encoding by using the `--encoding` flag. If BigQuery
* can't convert a character other than the ASCII `0` character, BigQuery converts the character
* to the standard Unicode replacement character: �.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String encoding;
/**
* Optional. The separator character for fields in a CSV file. The separator is interpreted as a
* single byte. For files encoded in ISO-8859-1, any single character can be used as a separator.
* For files encoded in UTF-8, characters represented in decimal range 1-127 (U+0001-U+007F) can
* be used without any modification. UTF-8 characters encoded with multiple bytes (i.e. U+0080 and
* above) will have only the first byte used for separating fields. The remaining bytes will be
* treated as a part of the field. BigQuery also supports the escape sequence "\t" (U+0009) to
* specify a tab separator. The default value is comma (",", U+002C).
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String fieldDelimiter;
/**
* Optional. Specifies how source URIs are interpreted for constructing the file set to load. By
* default, source URIs are expanded against the underlying storage. You can also specify manifest
* files to control how the file set is constructed. This option is only applicable to object
* storage systems.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String fileSetSpecType;
/**
* Optional. When set, configures hive partitioning support. Not all storage formats support hive
* partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as
* will providing an invalid specification.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private HivePartitioningOptions hivePartitioningOptions;
/**
* Optional. Indicates if BigQuery should allow extra values that are not represented in the table
* schema. If true, the extra values are ignored. If false, records with extra columns are treated
* as bad records, and if there are too many bad records, an invalid error is returned in the job
* result. The default value is false. The sourceFormat property determines what BigQuery treats
* as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
* in the table schema Avro, Parquet, ORC: Fields in the file schema that don't exist in the table
* schema.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean ignoreUnknownValues;
/**
* Optional. Load option to be used together with source_format newline-delimited JSON to indicate
* that a variant of JSON is being loaded. To load newline-delimited GeoJSON, specify GEOJSON (and
* source_format must be set to NEWLINE_DELIMITED_JSON).
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String jsonExtension;
/**
* Optional. The maximum number of bad records that BigQuery can ignore when running the job. If
* the number of bad records exceeds this value, an invalid error is returned in the job result.
* The default value is 0, which requires that all records are valid. This is only supported for
* CSV and NEWLINE_DELIMITED_JSON file formats.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Integer maxBadRecords;
/**
* Optional. Specifies a string that represents a null value in a CSV file. For example, if you
* specify "\N", BigQuery interprets "\N" as a null value when loading a CSV file. The default
* value is the empty string. If you set this property to a custom value, BigQuery throws an error
* if an empty string is present for all data types except for STRING and BYTE. For STRING and
* BYTE columns, BigQuery interprets the empty string as an empty value.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String nullMarker;
/**
* Optional. Additional properties to set if sourceFormat is set to PARQUET.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private ParquetOptions parquetOptions;
/**
* Optional. When sourceFormat is set to "CSV", this indicates whether the embedded ASCII control
* characters (the first 32 characters in the ASCII-table, from '\x00' to '\x1F') are preserved.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean preserveAsciiControlCharacters;
/**
* If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into
* BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level
* properties. If no properties are specified, BigQuery loads all properties. If any named
* property isn't found in the Cloud Datastore backup, an invalid error is returned in the job
* result.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List projectionFields;
/**
* Optional. The value that is used to quote data sections in a CSV file. BigQuery converts the
* string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the
* data in its raw, binary state. The default value is a double-quote ('"'). If your data does not
* contain quoted sections, set the property value to an empty string. If your data contains
* quoted newline characters, you must also set the allowQuotedNewlines property to true. To
* include the specific quote character within a quoted value, precede it with an additional
* matching quote character. For example, if you want to escape the default character ' " ', use '
* "" '. @default "
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String quote;
/**
* Range partitioning specification for the destination table. Only one of timePartitioning and
* rangePartitioning should be specified.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private RangePartitioning rangePartitioning;
/**
* Optional. The user can provide a reference file with the reader schema. This file is only
* loaded if it is part of source URIs, but is not loaded otherwise. It is enabled for the
* following formats: AVRO, PARQUET, ORC.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String referenceFileSchemaUri;
/**
* Optional. The schema for the destination table. The schema can be omitted if the destination
* table already exists, or if you're loading data from Google Cloud Datastore.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private TableSchema schema;
/**
* [Deprecated] The inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For
* example, "foo:STRING, bar:INTEGER, baz:FLOAT".
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String schemaInline;
/**
* [Deprecated] The format of the schemaInline property.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String schemaInlineFormat;
/**
* Allows the schema of the destination table to be updated as a side effect of the load job if a
* schema is autodetected or supplied in the job configuration. Schema update options are
* supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is
* WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition
* decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of
* the following values are specified: * ALLOW_FIELD_ADDITION: allow adding a nullable field to
* the schema. * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to
* nullable.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List schemaUpdateOptions;
/**
* Optional. The number of rows at the top of a CSV file that BigQuery will skip when loading the
* data. The default value is 0. This property is useful if you have header rows in the file that
* should be skipped. When autodetect is on, the behavior is the following: * skipLeadingRows
* unspecified - Autodetect tries to detect headers in the first row. If they are not detected,
* the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows
* is 0 - Instructs autodetect that there are no headers and data should be read starting from the
* first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in
* row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract
* column names for the detected schema.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Integer skipLeadingRows;
/**
* Optional. The format of the data files. For CSV files, specify "CSV". For datastore backups,
* specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For
* Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". The default value
* is CSV.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String sourceFormat;
/**
* [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud
* Storage URIs: Each URI can contain one '*' wildcard character and it must come after the
* 'bucket' name. Size limits related to load jobs apply to external data sources. For Google
* Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid
* HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one
* URI can be specified. Also, the '*' wildcard character is not allowed.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.util.List sourceUris;
/**
* Time-based partitioning specification for the destination table. Only one of timePartitioning
* and rangePartitioning should be specified.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private TimePartitioning timePartitioning;
/**
* Optional. If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the
* corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for
* example, INTEGER).
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.Boolean useAvroLogicalTypes;
/**
* Optional. Specifies the action that occurs if the destination table already exists. The
* following values are supported: * WRITE_TRUNCATE: If the table already exists, BigQuery
* overwrites the data, removes the constraints and uses the schema from the load job. *
* WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. *
* WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in
* the job result. The default value is WRITE_APPEND. Each action is atomic and only occurs if
* BigQuery is able to complete the job successfully. Creation, truncation and append actions
* occur as one atomic update upon job completion.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String writeDisposition;
/**
* Optional. Accept rows that are missing trailing optional columns. The missing values are
* treated as nulls. If false, records with missing trailing columns are treated as bad records,
* and if there are too many bad records, an invalid error is returned in the job result. The
* default value is false. Only applicable to CSV, ignored for other formats.
* @return value or {@code null} for none
*/
public java.lang.Boolean getAllowJaggedRows() {
return allowJaggedRows;
}
/**
* Optional. Accept rows that are missing trailing optional columns. The missing values are
* treated as nulls. If false, records with missing trailing columns are treated as bad records,
* and if there are too many bad records, an invalid error is returned in the job result. The
* default value is false. Only applicable to CSV, ignored for other formats.
* @param allowJaggedRows allowJaggedRows or {@code null} for none
*/
public JobConfigurationLoad setAllowJaggedRows(java.lang.Boolean allowJaggedRows) {
this.allowJaggedRows = allowJaggedRows;
return this;
}
/**
* Indicates if BigQuery should allow quoted data sections that contain newline characters in a
* CSV file. The default value is false.
* @return value or {@code null} for none
*/
public java.lang.Boolean getAllowQuotedNewlines() {
return allowQuotedNewlines;
}
/**
* Indicates if BigQuery should allow quoted data sections that contain newline characters in a
* CSV file. The default value is false.
* @param allowQuotedNewlines allowQuotedNewlines or {@code null} for none
*/
public JobConfigurationLoad setAllowQuotedNewlines(java.lang.Boolean allowQuotedNewlines) {
this.allowQuotedNewlines = allowQuotedNewlines;
return this;
}
/**
* Optional. Indicates if we should automatically infer the options and schema for CSV and JSON
* sources.
* @return value or {@code null} for none
*/
public java.lang.Boolean getAutodetect() {
return autodetect;
}
/**
* Optional. Indicates if we should automatically infer the options and schema for CSV and JSON
* sources.
* @param autodetect autodetect or {@code null} for none
*/
public JobConfigurationLoad setAutodetect(java.lang.Boolean autodetect) {
this.autodetect = autodetect;
return this;
}
/**
* Clustering specification for the destination table.
* @return value or {@code null} for none
*/
public Clustering getClustering() {
return clustering;
}
/**
* Clustering specification for the destination table.
* @param clustering clustering or {@code null} for none
*/
public JobConfigurationLoad setClustering(Clustering clustering) {
this.clustering = clustering;
return this;
}
/**
* Optional. Character map supported for column names in CSV/Parquet loads. Defaults to STRICT and
* can be overridden by Project Config Service. Using this option with unsupporting load formats
* will result in an error.
* @return value or {@code null} for none
*/
public java.lang.String getColumnNameCharacterMap() {
return columnNameCharacterMap;
}
/**
* Optional. Character map supported for column names in CSV/Parquet loads. Defaults to STRICT and
* can be overridden by Project Config Service. Using this option with unsupporting load formats
* will result in an error.
* @param columnNameCharacterMap columnNameCharacterMap or {@code null} for none
*/
public JobConfigurationLoad setColumnNameCharacterMap(java.lang.String columnNameCharacterMap) {
this.columnNameCharacterMap = columnNameCharacterMap;
return this;
}
/**
* Optional. Connection properties which can modify the load job behavior. Currently, only the
* 'session_id' connection property is supported, and is used to resolve _SESSION appearing as the
* dataset id.
* @return value or {@code null} for none
*/
public java.util.List getConnectionProperties() {
return connectionProperties;
}
/**
* Optional. Connection properties which can modify the load job behavior. Currently, only the
* 'session_id' connection property is supported, and is used to resolve _SESSION appearing as the
* dataset id.
* @param connectionProperties connectionProperties or {@code null} for none
*/
public JobConfigurationLoad setConnectionProperties(java.util.List connectionProperties) {
this.connectionProperties = connectionProperties;
return this;
}
/**
* Optional. [Experimental] Configures the load job to copy files directly to the destination
* BigLake managed table, bypassing file content reading and rewriting. Copying files only is
* supported when all the following are true: * `source_uris` are located in the same Cloud
* Storage location as the destination table's `storage_uri` location. * `source_format` is
* `PARQUET`. * `destination_table` is an existing BigLake managed table. The table's schema does
* not have flexible column names. The table's columns do not have type parameters other than
* precision and scale. * No options other than the above are specified.
* @return value or {@code null} for none
*/
public java.lang.Boolean getCopyFilesOnly() {
return copyFilesOnly;
}
/**
* Optional. [Experimental] Configures the load job to copy files directly to the destination
* BigLake managed table, bypassing file content reading and rewriting. Copying files only is
* supported when all the following are true: * `source_uris` are located in the same Cloud
* Storage location as the destination table's `storage_uri` location. * `source_format` is
* `PARQUET`. * `destination_table` is an existing BigLake managed table. The table's schema does
* not have flexible column names. The table's columns do not have type parameters other than
* precision and scale. * No options other than the above are specified.
* @param copyFilesOnly copyFilesOnly or {@code null} for none
*/
public JobConfigurationLoad setCopyFilesOnly(java.lang.Boolean copyFilesOnly) {
this.copyFilesOnly = copyFilesOnly;
return this;
}
/**
* Optional. Specifies whether the job is allowed to create new tables. The following values are
* supported: * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. *
* CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in
* the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions
* occur as one atomic update upon job completion.
* @return value or {@code null} for none
*/
public java.lang.String getCreateDisposition() {
return createDisposition;
}
/**
* Optional. Specifies whether the job is allowed to create new tables. The following values are
* supported: * CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. *
* CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in
* the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions
* occur as one atomic update upon job completion.
* @param createDisposition createDisposition or {@code null} for none
*/
public JobConfigurationLoad setCreateDisposition(java.lang.String createDisposition) {
this.createDisposition = createDisposition;
return this;
}
/**
* Optional. If this property is true, the job creates a new session using a randomly generated
* session_id. To continue using a created session with subsequent queries, pass the existing
* session identifier as a `ConnectionProperty` value. The session identifier is returned as part
* of the `SessionInfo` message within the query statistics. The new session's location will be
* set to `Job.JobReference.location` if it is present, otherwise it's set to the default location
* based on existing routing logic.
* @return value or {@code null} for none
*/
public java.lang.Boolean getCreateSession() {
return createSession;
}
/**
* Optional. If this property is true, the job creates a new session using a randomly generated
* session_id. To continue using a created session with subsequent queries, pass the existing
* session identifier as a `ConnectionProperty` value. The session identifier is returned as part
* of the `SessionInfo` message within the query statistics. The new session's location will be
* set to `Job.JobReference.location` if it is present, otherwise it's set to the default location
* based on existing routing logic.
* @param createSession createSession or {@code null} for none
*/
public JobConfigurationLoad setCreateSession(java.lang.Boolean createSession) {
this.createSession = createSession;
return this;
}
/**
* Defines the list of possible SQL data types to which the source decimal values are converted.
* This list and the precision and the scale parameters of the decimal field determine the target
* type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the
* specified list and if it supports the precision and the scale. STRING supports all precision
* and scale values. If none of the listed types supports the precision and the scale, the type
* supporting the widest range in the specified list is picked, and if a value exceeds the
* supported range when reading the data, an error will be thrown. Example: Suppose the value of
* this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9)
* -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot
* hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value
* exeeds supported range). This field cannot contain duplicate types. The order of the types in
* this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC",
* "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC",
* "STRING"] for ORC and ["NUMERIC"] for the other file formats.
* @return value or {@code null} for none
*/
public java.util.List getDecimalTargetTypes() {
return decimalTargetTypes;
}
/**
* Defines the list of possible SQL data types to which the source decimal values are converted.
* This list and the precision and the scale parameters of the decimal field determine the target
* type. In the order of NUMERIC, BIGNUMERIC, and STRING, a type is picked if it is in the
* specified list and if it supports the precision and the scale. STRING supports all precision
* and scale values. If none of the listed types supports the precision and the scale, the type
* supporting the widest range in the specified list is picked, and if a value exceeds the
* supported range when reading the data, an error will be thrown. Example: Suppose the value of
* this field is ["NUMERIC", "BIGNUMERIC"]. If (precision,scale) is: * (38,9) -> NUMERIC; * (39,9)
* -> BIGNUMERIC (NUMERIC cannot hold 30 integer digits); * (38,10) -> BIGNUMERIC (NUMERIC cannot
* hold 10 fractional digits); * (76,38) -> BIGNUMERIC; * (77,38) -> BIGNUMERIC (error if value
* exeeds supported range). This field cannot contain duplicate types. The order of the types in
* this field is ignored. For example, ["BIGNUMERIC", "NUMERIC"] is the same as ["NUMERIC",
* "BIGNUMERIC"] and NUMERIC always takes precedence over BIGNUMERIC. Defaults to ["NUMERIC",
* "STRING"] for ORC and ["NUMERIC"] for the other file formats.
* @param decimalTargetTypes decimalTargetTypes or {@code null} for none
*/
public JobConfigurationLoad setDecimalTargetTypes(java.util.List decimalTargetTypes) {
this.decimalTargetTypes = decimalTargetTypes;
return this;
}
/**
* Custom encryption configuration (e.g., Cloud KMS keys)
* @return value or {@code null} for none
*/
public EncryptionConfiguration getDestinationEncryptionConfiguration() {
return destinationEncryptionConfiguration;
}
/**
* Custom encryption configuration (e.g., Cloud KMS keys)
* @param destinationEncryptionConfiguration destinationEncryptionConfiguration or {@code null} for none
*/
public JobConfigurationLoad setDestinationEncryptionConfiguration(EncryptionConfiguration destinationEncryptionConfiguration) {
this.destinationEncryptionConfiguration = destinationEncryptionConfiguration;
return this;
}
/**
* [Required] The destination table to load the data into.
* @return value or {@code null} for none
*/
public TableReference getDestinationTable() {
return destinationTable;
}
/**
* [Required] The destination table to load the data into.
* @param destinationTable destinationTable or {@code null} for none
*/
public JobConfigurationLoad setDestinationTable(TableReference destinationTable) {
this.destinationTable = destinationTable;
return this;
}
/**
* Optional. [Experimental] Properties with which to create the destination table if it is new.
* @return value or {@code null} for none
*/
public DestinationTableProperties getDestinationTableProperties() {
return destinationTableProperties;
}
/**
* Optional. [Experimental] Properties with which to create the destination table if it is new.
* @param destinationTableProperties destinationTableProperties or {@code null} for none
*/
public JobConfigurationLoad setDestinationTableProperties(DestinationTableProperties destinationTableProperties) {
this.destinationTableProperties = destinationTableProperties;
return this;
}
/**
* Optional. The character encoding of the data. The supported values are UTF-8, ISO-8859-1,
* UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. BigQuery decodes the
* data after the raw, binary data has been split using the values of the `quote` and
* `fieldDelimiter` properties. If you don't specify an encoding, or if you specify a UTF-8
* encoding when the CSV file is not UTF-8 encoded, BigQuery attempts to convert the data to
* UTF-8. Generally, your data loads successfully, but it may not match byte-for-byte what you
* expect. To avoid this, specify the correct encoding by using the `--encoding` flag. If BigQuery
* can't convert a character other than the ASCII `0` character, BigQuery converts the character
* to the standard Unicode replacement character: �.
* @return value or {@code null} for none
*/
public java.lang.String getEncoding() {
return encoding;
}
/**
* Optional. The character encoding of the data. The supported values are UTF-8, ISO-8859-1,
* UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8. BigQuery decodes the
* data after the raw, binary data has been split using the values of the `quote` and
* `fieldDelimiter` properties. If you don't specify an encoding, or if you specify a UTF-8
* encoding when the CSV file is not UTF-8 encoded, BigQuery attempts to convert the data to
* UTF-8. Generally, your data loads successfully, but it may not match byte-for-byte what you
* expect. To avoid this, specify the correct encoding by using the `--encoding` flag. If BigQuery
* can't convert a character other than the ASCII `0` character, BigQuery converts the character
* to the standard Unicode replacement character: �.
* @param encoding encoding or {@code null} for none
*/
public JobConfigurationLoad setEncoding(java.lang.String encoding) {
this.encoding = encoding;
return this;
}
/**
* Optional. The separator character for fields in a CSV file. The separator is interpreted as a
* single byte. For files encoded in ISO-8859-1, any single character can be used as a separator.
* For files encoded in UTF-8, characters represented in decimal range 1-127 (U+0001-U+007F) can
* be used without any modification. UTF-8 characters encoded with multiple bytes (i.e. U+0080 and
* above) will have only the first byte used for separating fields. The remaining bytes will be
* treated as a part of the field. BigQuery also supports the escape sequence "\t" (U+0009) to
* specify a tab separator. The default value is comma (",", U+002C).
* @return value or {@code null} for none
*/
public java.lang.String getFieldDelimiter() {
return fieldDelimiter;
}
/**
* Optional. The separator character for fields in a CSV file. The separator is interpreted as a
* single byte. For files encoded in ISO-8859-1, any single character can be used as a separator.
* For files encoded in UTF-8, characters represented in decimal range 1-127 (U+0001-U+007F) can
* be used without any modification. UTF-8 characters encoded with multiple bytes (i.e. U+0080 and
* above) will have only the first byte used for separating fields. The remaining bytes will be
* treated as a part of the field. BigQuery also supports the escape sequence "\t" (U+0009) to
* specify a tab separator. The default value is comma (",", U+002C).
* @param fieldDelimiter fieldDelimiter or {@code null} for none
*/
public JobConfigurationLoad setFieldDelimiter(java.lang.String fieldDelimiter) {
this.fieldDelimiter = fieldDelimiter;
return this;
}
/**
* Optional. Specifies how source URIs are interpreted for constructing the file set to load. By
* default, source URIs are expanded against the underlying storage. You can also specify manifest
* files to control how the file set is constructed. This option is only applicable to object
* storage systems.
* @return value or {@code null} for none
*/
public java.lang.String getFileSetSpecType() {
return fileSetSpecType;
}
/**
* Optional. Specifies how source URIs are interpreted for constructing the file set to load. By
* default, source URIs are expanded against the underlying storage. You can also specify manifest
* files to control how the file set is constructed. This option is only applicable to object
* storage systems.
* @param fileSetSpecType fileSetSpecType or {@code null} for none
*/
public JobConfigurationLoad setFileSetSpecType(java.lang.String fileSetSpecType) {
this.fileSetSpecType = fileSetSpecType;
return this;
}
/**
* Optional. When set, configures hive partitioning support. Not all storage formats support hive
* partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as
* will providing an invalid specification.
* @return value or {@code null} for none
*/
public HivePartitioningOptions getHivePartitioningOptions() {
return hivePartitioningOptions;
}
/**
* Optional. When set, configures hive partitioning support. Not all storage formats support hive
* partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as
* will providing an invalid specification.
* @param hivePartitioningOptions hivePartitioningOptions or {@code null} for none
*/
public JobConfigurationLoad setHivePartitioningOptions(HivePartitioningOptions hivePartitioningOptions) {
this.hivePartitioningOptions = hivePartitioningOptions;
return this;
}
/**
* Optional. Indicates if BigQuery should allow extra values that are not represented in the table
* schema. If true, the extra values are ignored. If false, records with extra columns are treated
* as bad records, and if there are too many bad records, an invalid error is returned in the job
* result. The default value is false. The sourceFormat property determines what BigQuery treats
* as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
* in the table schema Avro, Parquet, ORC: Fields in the file schema that don't exist in the table
* schema.
* @return value or {@code null} for none
*/
public java.lang.Boolean getIgnoreUnknownValues() {
return ignoreUnknownValues;
}
/**
* Optional. Indicates if BigQuery should allow extra values that are not represented in the table
* schema. If true, the extra values are ignored. If false, records with extra columns are treated
* as bad records, and if there are too many bad records, an invalid error is returned in the job
* result. The default value is false. The sourceFormat property determines what BigQuery treats
* as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names
* in the table schema Avro, Parquet, ORC: Fields in the file schema that don't exist in the table
* schema.
* @param ignoreUnknownValues ignoreUnknownValues or {@code null} for none
*/
public JobConfigurationLoad setIgnoreUnknownValues(java.lang.Boolean ignoreUnknownValues) {
this.ignoreUnknownValues = ignoreUnknownValues;
return this;
}
/**
* Optional. Load option to be used together with source_format newline-delimited JSON to indicate
* that a variant of JSON is being loaded. To load newline-delimited GeoJSON, specify GEOJSON (and
* source_format must be set to NEWLINE_DELIMITED_JSON).
* @return value or {@code null} for none
*/
public java.lang.String getJsonExtension() {
return jsonExtension;
}
/**
* Optional. Load option to be used together with source_format newline-delimited JSON to indicate
* that a variant of JSON is being loaded. To load newline-delimited GeoJSON, specify GEOJSON (and
* source_format must be set to NEWLINE_DELIMITED_JSON).
* @param jsonExtension jsonExtension or {@code null} for none
*/
public JobConfigurationLoad setJsonExtension(java.lang.String jsonExtension) {
this.jsonExtension = jsonExtension;
return this;
}
/**
* Optional. The maximum number of bad records that BigQuery can ignore when running the job. If
* the number of bad records exceeds this value, an invalid error is returned in the job result.
* The default value is 0, which requires that all records are valid. This is only supported for
* CSV and NEWLINE_DELIMITED_JSON file formats.
* @return value or {@code null} for none
*/
public java.lang.Integer getMaxBadRecords() {
return maxBadRecords;
}
/**
* Optional. The maximum number of bad records that BigQuery can ignore when running the job. If
* the number of bad records exceeds this value, an invalid error is returned in the job result.
* The default value is 0, which requires that all records are valid. This is only supported for
* CSV and NEWLINE_DELIMITED_JSON file formats.
* @param maxBadRecords maxBadRecords or {@code null} for none
*/
public JobConfigurationLoad setMaxBadRecords(java.lang.Integer maxBadRecords) {
this.maxBadRecords = maxBadRecords;
return this;
}
/**
* Optional. Specifies a string that represents a null value in a CSV file. For example, if you
* specify "\N", BigQuery interprets "\N" as a null value when loading a CSV file. The default
* value is the empty string. If you set this property to a custom value, BigQuery throws an error
* if an empty string is present for all data types except for STRING and BYTE. For STRING and
* BYTE columns, BigQuery interprets the empty string as an empty value.
* @return value or {@code null} for none
*/
public java.lang.String getNullMarker() {
return nullMarker;
}
/**
* Optional. Specifies a string that represents a null value in a CSV file. For example, if you
* specify "\N", BigQuery interprets "\N" as a null value when loading a CSV file. The default
* value is the empty string. If you set this property to a custom value, BigQuery throws an error
* if an empty string is present for all data types except for STRING and BYTE. For STRING and
* BYTE columns, BigQuery interprets the empty string as an empty value.
* @param nullMarker nullMarker or {@code null} for none
*/
public JobConfigurationLoad setNullMarker(java.lang.String nullMarker) {
this.nullMarker = nullMarker;
return this;
}
/**
* Optional. Additional properties to set if sourceFormat is set to PARQUET.
* @return value or {@code null} for none
*/
public ParquetOptions getParquetOptions() {
return parquetOptions;
}
/**
* Optional. Additional properties to set if sourceFormat is set to PARQUET.
* @param parquetOptions parquetOptions or {@code null} for none
*/
public JobConfigurationLoad setParquetOptions(ParquetOptions parquetOptions) {
this.parquetOptions = parquetOptions;
return this;
}
/**
* Optional. When sourceFormat is set to "CSV", this indicates whether the embedded ASCII control
* characters (the first 32 characters in the ASCII-table, from '\x00' to '\x1F') are preserved.
* @return value or {@code null} for none
*/
public java.lang.Boolean getPreserveAsciiControlCharacters() {
return preserveAsciiControlCharacters;
}
/**
* Optional. When sourceFormat is set to "CSV", this indicates whether the embedded ASCII control
* characters (the first 32 characters in the ASCII-table, from '\x00' to '\x1F') are preserved.
* @param preserveAsciiControlCharacters preserveAsciiControlCharacters or {@code null} for none
*/
public JobConfigurationLoad setPreserveAsciiControlCharacters(java.lang.Boolean preserveAsciiControlCharacters) {
this.preserveAsciiControlCharacters = preserveAsciiControlCharacters;
return this;
}
/**
* If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into
* BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level
* properties. If no properties are specified, BigQuery loads all properties. If any named
* property isn't found in the Cloud Datastore backup, an invalid error is returned in the job
* result.
* @return value or {@code null} for none
*/
public java.util.List getProjectionFields() {
return projectionFields;
}
/**
* If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into
* BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level
* properties. If no properties are specified, BigQuery loads all properties. If any named
* property isn't found in the Cloud Datastore backup, an invalid error is returned in the job
* result.
* @param projectionFields projectionFields or {@code null} for none
*/
public JobConfigurationLoad setProjectionFields(java.util.List projectionFields) {
this.projectionFields = projectionFields;
return this;
}
/**
* Optional. The value that is used to quote data sections in a CSV file. BigQuery converts the
* string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the
* data in its raw, binary state. The default value is a double-quote ('"'). If your data does not
* contain quoted sections, set the property value to an empty string. If your data contains
* quoted newline characters, you must also set the allowQuotedNewlines property to true. To
* include the specific quote character within a quoted value, precede it with an additional
* matching quote character. For example, if you want to escape the default character ' " ', use '
* "" '. @default "
* @return value or {@code null} for none
*/
public java.lang.String getQuote() {
return quote;
}
/**
* Optional. The value that is used to quote data sections in a CSV file. BigQuery converts the
* string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the
* data in its raw, binary state. The default value is a double-quote ('"'). If your data does not
* contain quoted sections, set the property value to an empty string. If your data contains
* quoted newline characters, you must also set the allowQuotedNewlines property to true. To
* include the specific quote character within a quoted value, precede it with an additional
* matching quote character. For example, if you want to escape the default character ' " ', use '
* "" '. @default "
* @param quote quote or {@code null} for none
*/
public JobConfigurationLoad setQuote(java.lang.String quote) {
this.quote = quote;
return this;
}
/**
* Range partitioning specification for the destination table. Only one of timePartitioning and
* rangePartitioning should be specified.
* @return value or {@code null} for none
*/
public RangePartitioning getRangePartitioning() {
return rangePartitioning;
}
/**
* Range partitioning specification for the destination table. Only one of timePartitioning and
* rangePartitioning should be specified.
* @param rangePartitioning rangePartitioning or {@code null} for none
*/
public JobConfigurationLoad setRangePartitioning(RangePartitioning rangePartitioning) {
this.rangePartitioning = rangePartitioning;
return this;
}
/**
* Optional. The user can provide a reference file with the reader schema. This file is only
* loaded if it is part of source URIs, but is not loaded otherwise. It is enabled for the
* following formats: AVRO, PARQUET, ORC.
* @return value or {@code null} for none
*/
public java.lang.String getReferenceFileSchemaUri() {
return referenceFileSchemaUri;
}
/**
* Optional. The user can provide a reference file with the reader schema. This file is only
* loaded if it is part of source URIs, but is not loaded otherwise. It is enabled for the
* following formats: AVRO, PARQUET, ORC.
* @param referenceFileSchemaUri referenceFileSchemaUri or {@code null} for none
*/
public JobConfigurationLoad setReferenceFileSchemaUri(java.lang.String referenceFileSchemaUri) {
this.referenceFileSchemaUri = referenceFileSchemaUri;
return this;
}
/**
* Optional. The schema for the destination table. The schema can be omitted if the destination
* table already exists, or if you're loading data from Google Cloud Datastore.
* @return value or {@code null} for none
*/
public TableSchema getSchema() {
return schema;
}
/**
* Optional. The schema for the destination table. The schema can be omitted if the destination
* table already exists, or if you're loading data from Google Cloud Datastore.
* @param schema schema or {@code null} for none
*/
public JobConfigurationLoad setSchema(TableSchema schema) {
this.schema = schema;
return this;
}
/**
* [Deprecated] The inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For
* example, "foo:STRING, bar:INTEGER, baz:FLOAT".
* @return value or {@code null} for none
*/
public java.lang.String getSchemaInline() {
return schemaInline;
}
/**
* [Deprecated] The inline schema. For CSV schemas, specify as "Field1:Type1[,Field2:Type2]*". For
* example, "foo:STRING, bar:INTEGER, baz:FLOAT".
* @param schemaInline schemaInline or {@code null} for none
*/
public JobConfigurationLoad setSchemaInline(java.lang.String schemaInline) {
this.schemaInline = schemaInline;
return this;
}
/**
* [Deprecated] The format of the schemaInline property.
* @return value or {@code null} for none
*/
public java.lang.String getSchemaInlineFormat() {
return schemaInlineFormat;
}
/**
* [Deprecated] The format of the schemaInline property.
* @param schemaInlineFormat schemaInlineFormat or {@code null} for none
*/
public JobConfigurationLoad setSchemaInlineFormat(java.lang.String schemaInlineFormat) {
this.schemaInlineFormat = schemaInlineFormat;
return this;
}
/**
* Allows the schema of the destination table to be updated as a side effect of the load job if a
* schema is autodetected or supplied in the job configuration. Schema update options are
* supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is
* WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition
* decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of
* the following values are specified: * ALLOW_FIELD_ADDITION: allow adding a nullable field to
* the schema. * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to
* nullable.
* @return value or {@code null} for none
*/
public java.util.List getSchemaUpdateOptions() {
return schemaUpdateOptions;
}
/**
* Allows the schema of the destination table to be updated as a side effect of the load job if a
* schema is autodetected or supplied in the job configuration. Schema update options are
* supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is
* WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition
* decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of
* the following values are specified: * ALLOW_FIELD_ADDITION: allow adding a nullable field to
* the schema. * ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to
* nullable.
* @param schemaUpdateOptions schemaUpdateOptions or {@code null} for none
*/
public JobConfigurationLoad setSchemaUpdateOptions(java.util.List schemaUpdateOptions) {
this.schemaUpdateOptions = schemaUpdateOptions;
return this;
}
/**
* Optional. The number of rows at the top of a CSV file that BigQuery will skip when loading the
* data. The default value is 0. This property is useful if you have header rows in the file that
* should be skipped. When autodetect is on, the behavior is the following: * skipLeadingRows
* unspecified - Autodetect tries to detect headers in the first row. If they are not detected,
* the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows
* is 0 - Instructs autodetect that there are no headers and data should be read starting from the
* first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in
* row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract
* column names for the detected schema.
* @return value or {@code null} for none
*/
public java.lang.Integer getSkipLeadingRows() {
return skipLeadingRows;
}
/**
* Optional. The number of rows at the top of a CSV file that BigQuery will skip when loading the
* data. The default value is 0. This property is useful if you have header rows in the file that
* should be skipped. When autodetect is on, the behavior is the following: * skipLeadingRows
* unspecified - Autodetect tries to detect headers in the first row. If they are not detected,
* the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows
* is 0 - Instructs autodetect that there are no headers and data should be read starting from the
* first row. * skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in
* row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract
* column names for the detected schema.
* @param skipLeadingRows skipLeadingRows or {@code null} for none
*/
public JobConfigurationLoad setSkipLeadingRows(java.lang.Integer skipLeadingRows) {
this.skipLeadingRows = skipLeadingRows;
return this;
}
/**
* Optional. The format of the data files. For CSV files, specify "CSV". For datastore backups,
* specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For
* Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". The default value
* is CSV.
* @return value or {@code null} for none
*/
public java.lang.String getSourceFormat() {
return sourceFormat;
}
/**
* Optional. The format of the data files. For CSV files, specify "CSV". For datastore backups,
* specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For
* Avro, specify "AVRO". For parquet, specify "PARQUET". For orc, specify "ORC". The default value
* is CSV.
* @param sourceFormat sourceFormat or {@code null} for none
*/
public JobConfigurationLoad setSourceFormat(java.lang.String sourceFormat) {
this.sourceFormat = sourceFormat;
return this;
}
/**
* [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud
* Storage URIs: Each URI can contain one '*' wildcard character and it must come after the
* 'bucket' name. Size limits related to load jobs apply to external data sources. For Google
* Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid
* HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one
* URI can be specified. Also, the '*' wildcard character is not allowed.
* @return value or {@code null} for none
*/
public java.util.List getSourceUris() {
return sourceUris;
}
/**
* [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud
* Storage URIs: Each URI can contain one '*' wildcard character and it must come after the
* 'bucket' name. Size limits related to load jobs apply to external data sources. For Google
* Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid
* HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups: Exactly one
* URI can be specified. Also, the '*' wildcard character is not allowed.
* @param sourceUris sourceUris or {@code null} for none
*/
public JobConfigurationLoad setSourceUris(java.util.List sourceUris) {
this.sourceUris = sourceUris;
return this;
}
/**
* Time-based partitioning specification for the destination table. Only one of timePartitioning
* and rangePartitioning should be specified.
* @return value or {@code null} for none
*/
public TimePartitioning getTimePartitioning() {
return timePartitioning;
}
/**
* Time-based partitioning specification for the destination table. Only one of timePartitioning
* and rangePartitioning should be specified.
* @param timePartitioning timePartitioning or {@code null} for none
*/
public JobConfigurationLoad setTimePartitioning(TimePartitioning timePartitioning) {
this.timePartitioning = timePartitioning;
return this;
}
/**
* Optional. If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the
* corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for
* example, INTEGER).
* @return value or {@code null} for none
*/
public java.lang.Boolean getUseAvroLogicalTypes() {
return useAvroLogicalTypes;
}
/**
* Optional. If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the
* corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for
* example, INTEGER).
* @param useAvroLogicalTypes useAvroLogicalTypes or {@code null} for none
*/
public JobConfigurationLoad setUseAvroLogicalTypes(java.lang.Boolean useAvroLogicalTypes) {
this.useAvroLogicalTypes = useAvroLogicalTypes;
return this;
}
/**
* Optional. Specifies the action that occurs if the destination table already exists. The
* following values are supported: * WRITE_TRUNCATE: If the table already exists, BigQuery
* overwrites the data, removes the constraints and uses the schema from the load job. *
* WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. *
* WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in
* the job result. The default value is WRITE_APPEND. Each action is atomic and only occurs if
* BigQuery is able to complete the job successfully. Creation, truncation and append actions
* occur as one atomic update upon job completion.
* @return value or {@code null} for none
*/
public java.lang.String getWriteDisposition() {
return writeDisposition;
}
/**
* Optional. Specifies the action that occurs if the destination table already exists. The
* following values are supported: * WRITE_TRUNCATE: If the table already exists, BigQuery
* overwrites the data, removes the constraints and uses the schema from the load job. *
* WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. *
* WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in
* the job result. The default value is WRITE_APPEND. Each action is atomic and only occurs if
* BigQuery is able to complete the job successfully. Creation, truncation and append actions
* occur as one atomic update upon job completion.
* @param writeDisposition writeDisposition or {@code null} for none
*/
public JobConfigurationLoad setWriteDisposition(java.lang.String writeDisposition) {
this.writeDisposition = writeDisposition;
return this;
}
@Override
public JobConfigurationLoad set(String fieldName, Object value) {
return (JobConfigurationLoad) super.set(fieldName, value);
}
@Override
public JobConfigurationLoad clone() {
return (JobConfigurationLoad) super.clone();
}
}