error.error-conditions.json Maven / Gradle / Ivy
The newest version!
{
"AGGREGATE_FUNCTION_WITH_NONDETERMINISTIC_EXPRESSION" : {
"message" : [
"Non-deterministic expression should not appear in the arguments of an aggregate function."
],
"sqlState" : "42845"
},
"ALL_PARAMETERS_MUST_BE_NAMED" : {
"message" : [
"Using name parameterized queries requires all parameters to be named. Parameters missing names: ."
],
"sqlState" : "07001"
},
"ALL_PARTITION_COLUMNS_NOT_ALLOWED" : {
"message" : [
"Cannot use all columns for partition columns."
],
"sqlState" : "KD005"
},
"ALTER_TABLE_COLUMN_DESCRIPTOR_DUPLICATE" : {
"message" : [
"ALTER TABLE column specifies descriptor \"\" more than once, which is invalid."
],
"sqlState" : "42710"
},
"AMBIGUOUS_ALIAS_IN_NESTED_CTE" : {
"message" : [
"Name is ambiguous in nested CTE.",
"Please set to \"CORRECTED\" so that name defined in inner CTE takes precedence. If set it to \"LEGACY\", outer CTE definitions will take precedence.",
"See '/sql-migration-guide.html#query-engine'."
],
"sqlState" : "42KD0"
},
"AMBIGUOUS_COLUMN_OR_FIELD" : {
"message" : [
"Column or field is ambiguous and has matches."
],
"sqlState" : "42702"
},
"AMBIGUOUS_COLUMN_REFERENCE" : {
"message" : [
"Column is ambiguous. It's because you joined several DataFrame together, and some of these DataFrames are the same.",
"This column points to one of the DataFrames but Spark is unable to figure out which one.",
"Please alias the DataFrames with different names via `DataFrame.alias` before joining them,",
"and specify the column using qualified name, e.g. `df.alias(\"a\").join(df.alias(\"b\"), col(\"a.id\") > col(\"b.id\"))`."
],
"sqlState" : "42702"
},
"AMBIGUOUS_LATERAL_COLUMN_ALIAS" : {
"message" : [
"Lateral column alias is ambiguous and has matches."
],
"sqlState" : "42702"
},
"AMBIGUOUS_REFERENCE" : {
"message" : [
"Reference is ambiguous, could be: ."
],
"sqlState" : "42704"
},
"AMBIGUOUS_REFERENCE_TO_FIELDS" : {
"message" : [
"Ambiguous reference to the field . It appears times in the schema."
],
"sqlState" : "42000"
},
"ARITHMETIC_OVERFLOW" : {
"message" : [
". If necessary set to \"false\" to bypass this error."
],
"sqlState" : "22003"
},
"ASSIGNMENT_ARITY_MISMATCH" : {
"message" : [
"The number of columns or variables assigned or aliased: does not match the number of source expressions: ."
],
"sqlState" : "42802"
},
"AS_OF_JOIN" : {
"message" : [
"Invalid as-of join."
],
"subClass" : {
"TOLERANCE_IS_NON_NEGATIVE" : {
"message" : [
"The input argument `tolerance` must be non-negative."
]
},
"TOLERANCE_IS_UNFOLDABLE" : {
"message" : [
"The input argument `tolerance` must be a constant."
]
},
"UNSUPPORTED_DIRECTION" : {
"message" : [
"Unsupported as-of join direction ''. Supported as-of join direction include: ."
]
}
},
"sqlState" : "42604"
},
"AVRO_INCOMPATIBLE_READ_TYPE" : {
"message" : [
"Cannot convert Avro to SQL because the original encoded data type is , however you're trying to read the field as , which would lead to an incorrect answer.",
"To allow reading this field, enable the SQL configuration: \"spark.sql.legacy.avro.allowIncompatibleSchema\"."
],
"sqlState" : "22KD3"
},
"AVRO_NOT_LOADED_SQL_FUNCTIONS_UNUSABLE" : {
"message" : [
"Cannot call the SQL function because the Avro data source is not loaded.",
"Please restart your job or session with the 'spark-avro' package loaded, such as by using the --packages argument on the command line, and then retry your query or command again."
],
"sqlState" : "22KD3"
},
"BATCH_METADATA_NOT_FOUND" : {
"message" : [
"Unable to find batch ."
],
"sqlState" : "42K03"
},
"BINARY_ARITHMETIC_OVERFLOW" : {
"message" : [
" caused overflow."
],
"sqlState" : "22003"
},
"BOOLEAN_STATEMENT_WITH_EMPTY_ROW" : {
"message" : [
"Boolean statement is invalid. Expected single row with a value of the BOOLEAN type, but got an empty row."
],
"sqlState" : "21000"
},
"CALL_ON_STREAMING_DATASET_UNSUPPORTED" : {
"message" : [
"The method can not be called on streaming Dataset/DataFrame."
],
"sqlState" : "42KDE"
},
"CANNOT_ALTER_COLLATION_BUCKET_COLUMN" : {
"message" : [
"ALTER TABLE (ALTER|CHANGE) COLUMN cannot change collation of type/subtypes of bucket columns, but found the bucket column in the table ."
],
"sqlState" : "428FR"
},
"CANNOT_ALTER_PARTITION_COLUMN" : {
"message" : [
"ALTER TABLE (ALTER|CHANGE) COLUMN is not supported for partition columns, but found the partition column in the table ."
],
"sqlState" : "428FR"
},
"CANNOT_ASSIGN_EVENT_TIME_COLUMN_WITHOUT_WATERMARK" : {
"message" : [
"Watermark needs to be defined to reassign event time column. Failed to find watermark definition in the streaming query."
],
"sqlState" : "42611"
},
"CANNOT_CAST_DATATYPE" : {
"message" : [
"Cannot cast to ."
],
"sqlState" : "42846"
},
"CANNOT_CONVERT_PROTOBUF_FIELD_TYPE_TO_SQL_TYPE" : {
"message" : [
"Cannot convert Protobuf to SQL because schema is incompatible (protobufType = , sqlType = )."
],
"sqlState" : "42846"
},
"CANNOT_CONVERT_PROTOBUF_MESSAGE_TYPE_TO_SQL_TYPE" : {
"message" : [
"Unable to convert of Protobuf to SQL type ."
],
"sqlState" : "42846"
},
"CANNOT_CONVERT_SQL_TYPE_TO_PROTOBUF_FIELD_TYPE" : {
"message" : [
"Cannot convert SQL to Protobuf because schema is incompatible (protobufType = , sqlType = )."
],
"sqlState" : "42846"
},
"CANNOT_CONVERT_SQL_VALUE_TO_PROTOBUF_ENUM_TYPE" : {
"message" : [
"Cannot convert SQL to Protobuf because is not in defined values for enum: ."
],
"sqlState" : "42846"
},
"CANNOT_CREATE_DATA_SOURCE_TABLE" : {
"message" : [
"Failed to create data source table :"
],
"subClass" : {
"EXTERNAL_METADATA_UNSUPPORTED" : {
"message" : [
"provider '' does not support external metadata but a schema is provided. Please remove the schema when creating the table."
]
}
},
"sqlState" : "42KDE"
},
"CANNOT_DECODE_URL" : {
"message" : [
"The provided URL cannot be decoded: . Please ensure that the URL is properly formatted and try again."
],
"sqlState" : "22546"
},
"CANNOT_INVOKE_IN_TRANSFORMATIONS" : {
"message" : [
"Dataset transformations and actions can only be invoked by the driver, not inside of other Dataset transformations; for example, dataset1.map(x => dataset2.values.count() * x) is invalid because the values transformation and count action cannot be performed inside of the dataset1.map transformation. For more information, see SPARK-28702."
],
"sqlState" : "0A000"
},
"CANNOT_LOAD_FUNCTION_CLASS" : {
"message" : [
"Cannot load class when registering the function , please make sure it is on the classpath."
],
"sqlState" : "46103"
},
"CANNOT_LOAD_PROTOBUF_CLASS" : {
"message" : [
"Could not load Protobuf class with name . ."
],
"sqlState" : "42K03"
},
"CANNOT_LOAD_STATE_STORE" : {
"message" : [
"An error occurred during loading state."
],
"subClass" : {
"CANNOT_READ_CHECKPOINT" : {
"message" : [
"Cannot read RocksDB checkpoint metadata. Expected , but found ."
]
},
"CANNOT_READ_DELTA_FILE_KEY_SIZE" : {
"message" : [
"Error reading delta file of : key size cannot be ."
]
},
"CANNOT_READ_DELTA_FILE_NOT_EXISTS" : {
"message" : [
"Error reading delta file of : does not exist."
]
},
"CANNOT_READ_MISSING_SNAPSHOT_FILE" : {
"message" : [
"Error reading snapshot file of : does not exist."
]
},
"CANNOT_READ_SNAPSHOT_FILE_KEY_SIZE" : {
"message" : [
"Error reading snapshot file of : key size cannot be ."
]
},
"CANNOT_READ_SNAPSHOT_FILE_VALUE_SIZE" : {
"message" : [
"Error reading snapshot file of : value size cannot be ."
]
},
"CANNOT_READ_STREAMING_STATE_FILE" : {
"message" : [
"Error reading streaming state file of does not exist. If the stream job is restarted with a new or updated state operation, please create a new checkpoint location or clear the existing checkpoint location."
]
},
"HDFS_STORE_PROVIDER_OUT_OF_MEMORY" : {
"message" : [
"Could not load HDFS state store with id because of an out of memory exception."
]
},
"INVALID_CHANGE_LOG_READER_VERSION" : {
"message" : [
"The change log reader version cannot be ."
]
},
"INVALID_CHANGE_LOG_WRITER_VERSION" : {
"message" : [
"The change log writer version cannot be ."
]
},
"ROCKSDB_STORE_PROVIDER_OUT_OF_MEMORY" : {
"message" : [
"Could not load RocksDB state store with id because of an out of memory exception."
]
},
"SNAPSHOT_PARTITION_ID_NOT_FOUND" : {
"message" : [
"Partition id not found for state of operator at ."
]
},
"UNCATEGORIZED" : {
"message" : [
""
]
},
"UNEXPECTED_FILE_SIZE" : {
"message" : [
"Copied to , expected bytes, found bytes."
]
},
"UNEXPECTED_VERSION" : {
"message" : [
"Version cannot be because it is less than 0."
]
},
"UNRELEASED_THREAD_ERROR" : {
"message" : [
": RocksDB instance could not be acquired by for operationType= as it was not released by after ms.",
"Thread holding the lock has trace: "
]
}
},
"sqlState" : "58030"
},
"CANNOT_MERGE_INCOMPATIBLE_DATA_TYPE" : {
"message" : [
"Failed to merge incompatible data types and . Please check the data types of the columns being merged and ensure that they are compatible. If necessary, consider casting the columns to compatible data types before attempting the merge."
],
"sqlState" : "42825"
},
"CANNOT_MERGE_SCHEMAS" : {
"message" : [
"Failed merging schemas:",
"Initial schema:",
"",
"Schema that cannot be merged with the initial schema:",
"."
],
"sqlState" : "42KD9"
},
"CANNOT_MODIFY_CONFIG" : {
"message" : [
"Cannot modify the value of the Spark config: .",
"See also '/sql-migration-guide.html#ddl-statements'."
],
"sqlState" : "46110"
},
"CANNOT_PARSE_DECIMAL" : {
"message" : [
"Cannot parse decimal. Please ensure that the input is a valid number with optional decimal point or comma separators."
],
"sqlState" : "22018"
},
"CANNOT_PARSE_INTERVAL" : {
"message" : [
"Unable to parse . Please ensure that the value provided is in a valid format for defining an interval. You can reference the documentation for the correct format. If the issue persists, please double check that the input value is not null or empty and try again."
],
"sqlState" : "22006"
},
"CANNOT_PARSE_JSON_FIELD" : {
"message" : [
"Cannot parse the field name and the value of the JSON token type to target Spark data type ."
],
"sqlState" : "2203G"
},
"CANNOT_PARSE_PROTOBUF_DESCRIPTOR" : {
"message" : [
"Error parsing descriptor bytes into Protobuf FileDescriptorSet."
],
"sqlState" : "22018"
},
"CANNOT_PARSE_TIMESTAMP" : {
"message" : [
". If necessary set to \"false\" to bypass this error."
],
"sqlState" : "22007"
},
"CANNOT_RECOGNIZE_HIVE_TYPE" : {
"message" : [
"Cannot recognize hive type string: , column: . The specified data type for the field cannot be recognized by Spark SQL. Please check the data type of the specified field and ensure that it is a valid Spark SQL data type. Refer to the Spark SQL documentation for a list of valid data types and their format. If the data type is correct, please ensure that you are using a supported version of Spark SQL."
],
"sqlState" : "429BB"
},
"CANNOT_RENAME_ACROSS_SCHEMA" : {
"message" : [
"Renaming a across schemas is not allowed."
],
"sqlState" : "0AKD0"
},
"CANNOT_RESOLVE_DATAFRAME_COLUMN" : {
"message" : [
"Cannot resolve dataframe column . It's probably because of illegal references like `df1.select(df2.col(\"a\"))`."
],
"sqlState" : "42704"
},
"CANNOT_RESOLVE_STAR_EXPAND" : {
"message" : [
"Cannot resolve .* given input columns . Please check that the specified table or struct exists and is accessible in the input columns."
],
"sqlState" : "42704"
},
"CANNOT_RESTORE_PERMISSIONS_FOR_PATH" : {
"message" : [
"Failed to set permissions on created path back to ."
],
"sqlState" : "58030"
},
"CANNOT_UPDATE_FIELD" : {
"message" : [
"Cannot update field type:"
],
"subClass" : {
"ARRAY_TYPE" : {
"message" : [
"Update the element by updating .element."
]
},
"INTERVAL_TYPE" : {
"message" : [
"Update an interval by updating its fields."
]
},
"MAP_TYPE" : {
"message" : [
"Update a map by updating .key or .value."
]
},
"STRUCT_TYPE" : {
"message" : [
"Update a struct by updating its fields."
]
},
"USER_DEFINED_TYPE" : {
"message" : [
"Update a UserDefinedType[] by updating its fields."
]
}
},
"sqlState" : "0A000"
},
"CANNOT_UP_CAST_DATATYPE" : {
"message" : [
"Cannot up cast from to .",
""
],
"sqlState" : "42846"
},
"CANNOT_USE_KRYO" : {
"message" : [
"Cannot load Kryo serialization codec. Kryo serialization cannot be used in the Spark Connect client. Use Java serialization, provide a custom Codec, or use Spark Classic instead."
],
"sqlState" : "22KD3"
},
"CANNOT_WRITE_STATE_STORE" : {
"message" : [
"Error writing state store files for provider ."
],
"subClass" : {
"CANNOT_COMMIT" : {
"message" : [
"Cannot perform commit during state checkpoint."
]
}
},
"sqlState" : "58030"
},
"CAST_INVALID_INPUT" : {
"message" : [
"The value of the type cannot be cast to because it is malformed. Correct the value as per the syntax, or change its target type. Use `try_cast` to tolerate malformed input and return NULL instead."
],
"sqlState" : "22018"
},
"CAST_OVERFLOW" : {
"message" : [
"The value of the type cannot be cast to due to an overflow. Use `try_cast` to tolerate overflow and return NULL instead."
],
"sqlState" : "22003"
},
"CAST_OVERFLOW_IN_TABLE_INSERT" : {
"message" : [
"Fail to assign a value of type to the type column or variable due to an overflow. Use `try_cast` on the input value to tolerate overflow and return NULL instead."
],
"sqlState" : "22003"
},
"CATALOG_NOT_FOUND" : {
"message" : [
"The catalog not found. Consider to set the SQL config to a catalog plugin."
],
"sqlState" : "42P08"
},
"CHECKPOINT_RDD_BLOCK_ID_NOT_FOUND" : {
"message" : [
"Checkpoint block not found!",
"Either the executor that originally checkpointed this partition is no longer alive, or the original RDD is unpersisted.",
"If this problem persists, you may consider using `rdd.checkpoint()` instead, which is slower than local checkpointing but more fault-tolerant."
],
"sqlState" : "56000"
},
"CLASS_NOT_OVERRIDE_EXPECTED_METHOD" : {
"message" : [
" must override either or ."
],
"sqlState" : "38000"
},
"CLASS_UNSUPPORTED_BY_MAP_OBJECTS" : {
"message" : [
"`MapObjects` does not support the class as resulting collection."
],
"sqlState" : "0A000"
},
"CLUSTERING_COLUMNS_MISMATCH" : {
"message" : [
"Specified clustering does not match that of the existing table .",
"Specified clustering columns: [].",
"Existing clustering columns: []."
],
"sqlState" : "42P10"
},
"CLUSTERING_NOT_SUPPORTED" : {
"message" : [
"'' does not support clustering."
],
"sqlState" : "42000"
},
"CODEC_NOT_AVAILABLE" : {
"message" : [
"The codec is not available."
],
"subClass" : {
"WITH_AVAILABLE_CODECS_SUGGESTION" : {
"message" : [
"Available codecs are ."
]
},
"WITH_CONF_SUGGESTION" : {
"message" : [
"Consider to set the config to ."
]
}
},
"sqlState" : "56038"
},
"CODEC_SHORT_NAME_NOT_FOUND" : {
"message" : [
"Cannot find a short name for the codec ."
],
"sqlState" : "42704"
},
"COLLATION_INVALID_NAME" : {
"message" : [
"The value does not represent a correct collation name. Suggested valid collation names: []."
],
"sqlState" : "42704"
},
"COLLATION_INVALID_PROVIDER" : {
"message" : [
"The value does not represent a correct collation provider. Supported providers are: []."
],
"sqlState" : "42704"
},
"COLLATION_MISMATCH" : {
"message" : [
"Could not determine which collation to use for string functions and operators."
],
"subClass" : {
"EXPLICIT" : {
"message" : [
"Error occurred due to the mismatch between explicit collations: []. Decide on a single explicit collation and remove others."
]
},
"IMPLICIT" : {
"message" : [
"Error occurred due to the mismatch between multiple implicit non-default collations. Use COLLATE function to set the collation explicitly."
]
}
},
"sqlState" : "42P21"
},
"COLLECTION_SIZE_LIMIT_EXCEEDED" : {
"message" : [
"Can't create array with elements which exceeding the array size limit ,"
],
"subClass" : {
"FUNCTION" : {
"message" : [
"unsuccessful try to create arrays in the function ."
]
},
"INITIALIZE" : {
"message" : [
"cannot initialize an array with specified parameters."
]
},
"PARAMETER" : {
"message" : [
"the value of parameter(s) in the function is invalid."
]
}
},
"sqlState" : "54000"
},
"COLUMN_ALIASES_NOT_ALLOWED" : {
"message" : [
"Column aliases are not allowed in ."
],
"sqlState" : "42601"
},
"COLUMN_ALREADY_EXISTS" : {
"message" : [
"The column already exists. Choose another name or rename the existing column."
],
"sqlState" : "42711"
},
"COLUMN_NOT_DEFINED_IN_TABLE" : {
"message" : [
" column is not defined in table , defined table columns are: ."
],
"sqlState" : "42703"
},
"COLUMN_NOT_FOUND" : {
"message" : [
"The column cannot be found. Verify the spelling and correctness of the column name according to the SQL config ."
],
"sqlState" : "42703"
},
"COMPARATOR_RETURNS_NULL" : {
"message" : [
"The comparator has returned a NULL for a comparison between and .",
"It should return a positive integer for \"greater than\", 0 for \"equal\" and a negative integer for \"less than\".",
"To revert to deprecated behavior where NULL is treated as 0 (equal), you must set \"spark.sql.legacy.allowNullComparisonResultInArraySort\" to \"true\"."
],
"sqlState" : "22004"
},
"COMPLEX_EXPRESSION_UNSUPPORTED_INPUT" : {
"message" : [
"Cannot process input data types for the expression: ."
],
"subClass" : {
"MISMATCHED_TYPES" : {
"message" : [
"All input types must be the same except nullable, containsNull, valueContainsNull flags, but found the input types ."
]
},
"NO_INPUTS" : {
"message" : [
"The collection of input data types must not be empty."
]
}
},
"sqlState" : "42K09"
},
"CONCURRENT_QUERY" : {
"message" : [
"Another instance of this query was just started by a concurrent session."
],
"sqlState" : "0A000"
},
"CONCURRENT_STREAM_LOG_UPDATE" : {
"message" : [
"Concurrent update to the log. Multiple streaming jobs detected for .",
"Please make sure only one streaming job runs on a specific checkpoint location at a time."
],
"sqlState" : "40000"
},
"CONFLICTING_PARTITION_COLUMN_NAMES" : {
"message" : [
"Conflicting partition column names detected:",
"",
"For partitioned table directories, data files should only live in leaf directories.",
"And directories at the same level should have the same partition column name.",
"Please check the following directories for unexpected files or inconsistent partition column names:",
""
],
"sqlState" : "KD009"
},
"CONNECT" : {
"message" : [
"Generic Spark Connect error."
],
"subClass" : {
"INTERCEPTOR_CTOR_MISSING" : {
"message" : [
"Cannot instantiate GRPC interceptor because is missing a default constructor without arguments."
]
},
"INTERCEPTOR_RUNTIME_ERROR" : {
"message" : [
"Error instantiating GRPC interceptor: "
]
},
"PLUGIN_CTOR_MISSING" : {
"message" : [
"Cannot instantiate Spark Connect plugin because is missing a default constructor without arguments."
]
},
"PLUGIN_RUNTIME_ERROR" : {
"message" : [
"Error instantiating Spark Connect plugin: "
]
},
"SESSION_NOT_SAME" : {
"message" : [
"Both Datasets must belong to the same SparkSession."
]
}
},
"sqlState" : "56K00"
},
"CONVERSION_INVALID_INPUT" : {
"message" : [
"The value () cannot be converted to because it is malformed. Correct the value as per the syntax, or change its format. Use to tolerate malformed input and return NULL instead."
],
"sqlState" : "22018"
},
"CREATE_PERMANENT_VIEW_WITHOUT_ALIAS" : {
"message" : [
"Not allowed to create the permanent view without explicitly assigning an alias for the expression ."
],
"sqlState" : "0A000"
},
"CREATE_TABLE_COLUMN_DESCRIPTOR_DUPLICATE" : {
"message" : [
"CREATE TABLE column specifies descriptor \"\" more than once, which is invalid."
],
"sqlState" : "42710"
},
"CREATE_VIEW_COLUMN_ARITY_MISMATCH" : {
"message" : [
"Cannot create view , the reason is"
],
"subClass" : {
"NOT_ENOUGH_DATA_COLUMNS" : {
"message" : [
"not enough data columns:",
"View columns: .",
"Data columns: ."
]
},
"TOO_MANY_DATA_COLUMNS" : {
"message" : [
"too many data columns:",
"View columns: .",
"Data columns: ."
]
}
},
"sqlState" : "21S01"
},
"DATATYPE_MISMATCH" : {
"message" : [
"Cannot resolve due to data type mismatch:"
],
"subClass" : {
"ARRAY_FUNCTION_DIFF_TYPES" : {
"message" : [
"Input to should have been followed by a value with same element type, but it's [, ]."
]
},
"BINARY_ARRAY_DIFF_TYPES" : {
"message" : [
"Input to function should have been two with same element type, but it's [, ]."
]
},
"BINARY_OP_DIFF_TYPES" : {
"message" : [
"the left and right operands of the binary operator have incompatible types ( and )."
]
},
"BINARY_OP_WRONG_TYPE" : {
"message" : [
"the binary operator requires the input type , not ."
]
},
"BLOOM_FILTER_BINARY_OP_WRONG_TYPE" : {
"message" : [
"The Bloom filter binary input to should be either a constant value or a scalar subquery expression, but it's ."
]
},
"BLOOM_FILTER_WRONG_TYPE" : {
"message" : [
"Input to function should have been followed by value with , but it's []."
]
},
"CANNOT_CONVERT_TO_JSON" : {
"message" : [
"Unable to convert column of type to JSON."
]
},
"CANNOT_DROP_ALL_FIELDS" : {
"message" : [
"Cannot drop all fields in struct."
]
},
"CAST_WITHOUT_SUGGESTION" : {
"message" : [
"cannot cast to ."
]
},
"CAST_WITH_CONF_SUGGESTION" : {
"message" : [
"cannot cast to with ANSI mode on.",
"If you have to cast to , you can set as ."
]
},
"CAST_WITH_FUNC_SUGGESTION" : {
"message" : [
"cannot cast to .",
"To convert values from to , you can use the functions instead."
]
},
"CREATE_MAP_KEY_DIFF_TYPES" : {
"message" : [
"The given keys of function should all be the same type, but they are ."
]
},
"CREATE_MAP_VALUE_DIFF_TYPES" : {
"message" : [
"The given values of function should all be the same type, but they are ."
]
},
"CREATE_NAMED_STRUCT_WITHOUT_FOLDABLE_STRING" : {
"message" : [
"Only foldable `STRING` expressions are allowed to appear at odd position, but they are ."
]
},
"DATA_DIFF_TYPES" : {
"message" : [
"Input to should all be the same type, but it's ."
]
},
"FILTER_NOT_BOOLEAN" : {
"message" : [
"Filter expression of type is not a boolean."
]
},
"HASH_MAP_TYPE" : {
"message" : [
"Input to the function cannot contain elements of the \"MAP\" type. In Spark, same maps may have different hashcode, thus hash expressions are prohibited on \"MAP\" elements. To restore previous behavior set \"spark.sql.legacy.allowHashOnMapType\" to \"true\"."
]
},
"HASH_VARIANT_TYPE" : {
"message" : [
"Input to the function cannot contain elements of the \"VARIANT\" type yet."
]
},
"INPUT_SIZE_NOT_ONE" : {
"message" : [
"Length of should be 1."
]
},
"INVALID_ARG_VALUE" : {
"message" : [
"The value must to be a literal of , but got ."
]
},
"INVALID_JSON_MAP_KEY_TYPE" : {
"message" : [
"Input schema can only contain STRING as a key type for a MAP."
]
},
"INVALID_JSON_SCHEMA" : {
"message" : [
"Input schema must be a struct, an array, a map or a variant."
]
},
"INVALID_MAP_KEY_TYPE" : {
"message" : [
"The key of map cannot be/contain ."
]
},
"INVALID_ORDERING_TYPE" : {
"message" : [
"The