Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.hudi;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.trino.filesystem.FileIterator;
import io.trino.filesystem.Location;
import io.trino.filesystem.TrinoFileSystem;
import io.trino.metastore.Column;
import io.trino.metastore.HivePartition;
import io.trino.plugin.hive.HiveColumnHandle;
import io.trino.plugin.hive.HivePartitionKey;
import io.trino.plugin.hive.HivePartitionManager;
import io.trino.plugin.hudi.model.HudiFileFormat;
import io.trino.plugin.hudi.table.HudiTableMetaClient;
import io.trino.spi.TrinoException;
import io.trino.spi.connector.SchemaTableName;
import io.trino.spi.predicate.Domain;
import io.trino.spi.predicate.NullableValue;
import io.trino.spi.predicate.TupleDomain;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import static io.trino.plugin.hive.HiveErrorCode.HIVE_INVALID_METADATA;
import static io.trino.plugin.hive.util.HiveUtil.checkCondition;
import static io.trino.plugin.hudi.HudiErrorCode.HUDI_FILESYSTEM_ERROR;
import static io.trino.plugin.hudi.HudiErrorCode.HUDI_UNSUPPORTED_FILE_FORMAT;
import static io.trino.plugin.hudi.table.HudiTableMetaClient.METAFOLDER_NAME;
public final class HudiUtil
{
private HudiUtil() {}
public static HudiFileFormat getHudiFileFormat(String path)
{
String extension = getFileExtension(path);
if (extension.equals(HudiFileFormat.PARQUET.getFileExtension())) {
return HudiFileFormat.PARQUET;
}
if (extension.equals(HudiFileFormat.HOODIE_LOG.getFileExtension())) {
return HudiFileFormat.HOODIE_LOG;
}
if (extension.equals(HudiFileFormat.ORC.getFileExtension())) {
return HudiFileFormat.ORC;
}
if (extension.equals(HudiFileFormat.HFILE.getFileExtension())) {
return HudiFileFormat.HFILE;
}
throw new TrinoException(HUDI_UNSUPPORTED_FILE_FORMAT, "Hoodie InputFormat not implemented for base file of type " + extension);
}
private static String getFileExtension(String fullName)
{
String fileName = Location.of(fullName).fileName();
int dotIndex = fileName.lastIndexOf('.');
return dotIndex == -1 ? "" : fileName.substring(dotIndex);
}
public static boolean hudiMetadataExists(TrinoFileSystem trinoFileSystem, Location baseLocation)
{
try {
Location metaLocation = baseLocation.appendPath(METAFOLDER_NAME);
FileIterator iterator = trinoFileSystem.listFiles(metaLocation);
// If there is at least one file in the .hoodie directory, it's a valid Hudi table
return iterator.hasNext();
}
catch (IOException e) {
throw new TrinoException(HUDI_FILESYSTEM_ERROR, "Failed to check for Hudi table at location: " + baseLocation, e);
}
}
public static boolean partitionMatchesPredicates(
SchemaTableName tableName,
String hivePartitionName,
List partitionColumnHandles,
TupleDomain constraintSummary)
{
HivePartition partition = HivePartitionManager.parsePartition(
tableName, hivePartitionName, partitionColumnHandles);
return partitionMatches(partitionColumnHandles, constraintSummary, partition);
}
public static boolean partitionMatches(List partitionColumns, TupleDomain constraintSummary, HivePartition partition)
{
if (constraintSummary.isNone()) {
return false;
}
Map domains = constraintSummary.getDomains().orElseGet(ImmutableMap::of);
for (HiveColumnHandle column : partitionColumns) {
NullableValue value = partition.getKeys().get(column);
Domain allowedDomain = domains.get(column);
if (allowedDomain != null && !allowedDomain.includesNullableValue(value.getValue())) {
return false;
}
}
return true;
}
public static List buildPartitionKeys(List keys, List values)
{
checkCondition(keys.size() == values.size(), HIVE_INVALID_METADATA,
"Expected %s partition key values, but got %s. Keys: %s, Values: %s.",
keys.size(), values.size(), keys, values);
ImmutableList.Builder partitionKeys = ImmutableList.builder();
for (int i = 0; i < keys.size(); i++) {
String name = keys.get(i).getName();
String value = values.get(i);
partitionKeys.add(new HivePartitionKey(name, value));
}
return partitionKeys.build();
}
public static HudiTableMetaClient buildTableMetaClient(
TrinoFileSystem fileSystem,
String basePath)
{
return HudiTableMetaClient.builder()
.setTrinoFileSystem(fileSystem)
.setBasePath(Location.of(basePath))
.build();
}
}