Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.metadata;
import org.apache.hudi.avro.model.HoodieMetadataColumnStats;
import org.apache.hudi.common.bloom.BloomFilter;
import org.apache.hudi.common.data.HoodieData;
import org.apache.hudi.common.engine.HoodieEngineContext;
import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.HoodiePartitionMetadata;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordGlobalLocation;
import org.apache.hudi.common.table.HoodieTableConfig;
import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.StringUtils;
import org.apache.hudi.common.util.collection.Pair;
import org.apache.hudi.exception.HoodieIOException;
import org.apache.hudi.exception.HoodieMetadataException;
import org.apache.hudi.exception.TableNotFoundException;
import org.apache.hudi.expression.BindVisitor;
import org.apache.hudi.expression.Expression;
import org.apache.hudi.expression.PartialBindVisitor;
import org.apache.hudi.expression.Predicates;
import org.apache.hudi.internal.schema.Types;
import org.apache.hudi.storage.HoodieStorage;
import org.apache.hudi.storage.HoodieStorageUtils;
import org.apache.hudi.storage.StoragePath;
import org.apache.hudi.storage.StoragePathInfo;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Implementation of {@link HoodieTableMetadata} based file-system-backed table metadata.
*/
public class FileSystemBackedTableMetadata extends AbstractHoodieTableMetadata {
private static final int DEFAULT_LISTING_PARALLELISM = 1500;
private final boolean hiveStylePartitioningEnabled;
private final boolean urlEncodePartitioningEnabled;
public FileSystemBackedTableMetadata(HoodieEngineContext engineContext, HoodieTableConfig tableConfig,
HoodieStorage storage, String datasetBasePath) {
super(engineContext, storage, datasetBasePath);
this.hiveStylePartitioningEnabled = Boolean.parseBoolean(tableConfig.getHiveStylePartitioningEnable());
this.urlEncodePartitioningEnabled = Boolean.parseBoolean(tableConfig.getUrlEncodePartitioning());
}
public FileSystemBackedTableMetadata(HoodieEngineContext engineContext,
HoodieStorage storage,
String datasetBasePath) {
super(engineContext, storage, datasetBasePath);
StoragePath metaPath =
new StoragePath(dataBasePath, HoodieTableMetaClient.METAFOLDER_NAME);
TableNotFoundException.checkTableValidity(storage, this.dataBasePath, metaPath);
HoodieTableConfig tableConfig = new HoodieTableConfig(storage, metaPath, null, null);
this.hiveStylePartitioningEnabled =
Boolean.parseBoolean(tableConfig.getHiveStylePartitioningEnable());
this.urlEncodePartitioningEnabled =
Boolean.parseBoolean(tableConfig.getUrlEncodePartitioning());
}
public HoodieStorage getStorage() {
if (storage == null) {
storage = HoodieStorageUtils.getStorage(dataBasePath, storageConf);
}
return storage;
}
@Override
public List getAllFilesInPartition(StoragePath partitionPath) throws IOException {
return FSUtils.getAllDataFilesInPartition(getStorage(), partitionPath);
}
@Override
public List getAllPartitionPaths() throws IOException {
return getPartitionPathWithPathPrefixes(Collections.singletonList(""));
}
@Override
public List getPartitionPathWithPathPrefixUsingFilterExpression(List relativePathPrefixes,
Types.RecordType partitionFields,
Expression expression) throws IOException {
return relativePathPrefixes.stream().flatMap(relativePathPrefix -> {
try {
return getPartitionPathWithPathPrefixUsingFilterExpression(relativePathPrefix,
partitionFields, expression).stream();
} catch (IOException e) {
throw new HoodieIOException("Error fetching partition paths with relative path: " + relativePathPrefix, e);
}
}).collect(Collectors.toList());
}
@Override
public List getPartitionPathWithPathPrefixes(List relativePathPrefixes) {
return relativePathPrefixes.stream().flatMap(relativePathPrefix -> {
try {
return getPartitionPathWithPathPrefix(relativePathPrefix).stream();
} catch (IOException e) {
throw new HoodieIOException("Error fetching partition paths with relative path: " + relativePathPrefix, e);
}
}).collect(Collectors.toList());
}
private List getPartitionPathWithPathPrefix(String relativePathPrefix) throws IOException {
return getPartitionPathWithPathPrefixUsingFilterExpression(relativePathPrefix, null, null);
}
private List getPartitionPathWithPathPrefixUsingFilterExpression(String relativePathPrefix,
Types.RecordType partitionFields,
Expression pushedExpr) throws IOException {
List pathsToList = new CopyOnWriteArrayList<>();
pathsToList.add(StringUtils.isNullOrEmpty(relativePathPrefix)
? dataBasePath : new StoragePath(dataBasePath, relativePathPrefix));
List partitionPaths = new CopyOnWriteArrayList<>();
int currentPartitionLevel = -1;
boolean needPushDownExpressions;
Expression fullBoundExpr;
// Not like `HoodieBackedTableMetadata`, since we don't know the exact partition levels here,
// given it's possible that partition values contains `/`, which could affect
// the result to get right `partitionValue` when listing paths, here we have
// to make it more strict that `urlEncodePartitioningEnabled` must be enabled.
// TODO better enable urlEncodePartitioningEnabled if hiveStylePartitioningEnabled is enabled?
if (hiveStylePartitioningEnabled && urlEncodePartitioningEnabled
&& pushedExpr != null && partitionFields != null) {
currentPartitionLevel = getPathPartitionLevel(partitionFields, relativePathPrefix);
needPushDownExpressions = true;
fullBoundExpr = pushedExpr.accept(new BindVisitor(partitionFields, caseSensitive));
} else {
fullBoundExpr = Predicates.alwaysTrue();
needPushDownExpressions = false;
}
while (!pathsToList.isEmpty()) {
// TODO: Get the parallelism from HoodieWriteConfig
int listingParallelism = Math.min(DEFAULT_LISTING_PARALLELISM, pathsToList.size());
// List all directories in parallel
engineContext.setJobStatus(this.getClass().getSimpleName(),
"Listing all partitions with prefix " + relativePathPrefix);
// Need to use serializable file status here, see HUDI-5936
List dirToFileListing = engineContext.flatMap(pathsToList, path -> {
try {
return getStorage().listDirectEntries(path).stream();
} catch (FileNotFoundException e) {
// The partition may have been cleaned.
return Stream.empty();
}
}, listingParallelism);
pathsToList.clear();
// if current dictionary contains PartitionMetadata, add it to result
// if current dictionary does not contain PartitionMetadata, add it to queue to be processed.
int fileListingParallelism = Math.min(DEFAULT_LISTING_PARALLELISM, dirToFileListing.size());
if (!dirToFileListing.isEmpty()) {
// result below holds a list of pair. first entry in the pair optionally holds the deduced list of partitions.
// and second entry holds optionally a directory path to be processed further.
engineContext.setJobStatus(this.getClass().getSimpleName(), "Processing listed partitions");
List, Option>> result =
engineContext.map(dirToFileListing,
fileInfo -> {
StoragePath path = fileInfo.getPath();
if (fileInfo.isDirectory()) {
if (HoodiePartitionMetadata.hasPartitionMetadata(getStorage(), path)) {
return Pair.of(
Option.of(FSUtils.getRelativePartitionPath(dataBasePath,
path)),
Option.empty());
} else if (!path.getName().equals(HoodieTableMetaClient.METAFOLDER_NAME)) {
return Pair.of(Option.empty(), Option.of(path));
}
} else if (path.getName()
.startsWith(HoodiePartitionMetadata.HOODIE_PARTITION_METAFILE_PREFIX)) {
String partitionName =
FSUtils.getRelativePartitionPath(dataBasePath,
path.getParent());
return Pair.of(Option.of(partitionName), Option.empty());
}
return Pair.of(Option.empty(), Option.empty());
}, fileListingParallelism);
partitionPaths.addAll(result.stream().filter(entry -> entry.getKey().isPresent())
.map(entry -> entry.getKey().get())
.filter(relativePartitionPath -> fullBoundExpr instanceof Predicates.TrueExpression
|| (Boolean) fullBoundExpr.eval(
extractPartitionValues(partitionFields, relativePartitionPath,
urlEncodePartitioningEnabled)))
.collect(Collectors.toList()));
Expression partialBoundExpr;
// If partitionPaths is nonEmpty, we're already at the last path level, and all paths
// are filtered already.
if (needPushDownExpressions && partitionPaths.isEmpty()) {
// Here we assume the path level matches the number of partition columns, so we'll rebuild
// new schema based on current path level.
// e.g. partition columns are , if we're listing the second level, then
// currentSchema would be
// `PartialBindVisitor` will bind reference if it can be found from `currentSchema`, otherwise
// will change the expression to `alwaysTrue`. Can see `PartialBindVisitor` for details.
Types.RecordType currentSchema = Types.RecordType.get(partitionFields.fields().subList(0, ++currentPartitionLevel));
PartialBindVisitor partialBindVisitor = new PartialBindVisitor(currentSchema, caseSensitive);
partialBoundExpr = pushedExpr.accept(partialBindVisitor);
} else {
partialBoundExpr = Predicates.alwaysTrue();
}
pathsToList.addAll(result.stream().filter(entry -> entry.getValue().isPresent()).map(entry -> entry.getValue().get())
.filter(path -> partialBoundExpr instanceof Predicates.TrueExpression
|| (Boolean) partialBoundExpr.eval(
extractPartitionValues(partitionFields, FSUtils.getRelativePartitionPath(dataBasePath, path), urlEncodePartitioningEnabled)))
.collect(Collectors.toList()));
}
}
return partitionPaths;
}
@Override
public Map> getAllFilesInPartitions(Collection partitionPaths)
throws IOException {
if (partitionPaths == null || partitionPaths.isEmpty()) {
return Collections.emptyMap();
}
int parallelism = Math.min(DEFAULT_LISTING_PARALLELISM, partitionPaths.size());
engineContext.setJobStatus(this.getClass().getSimpleName(),
"Listing all files in " + partitionPaths.size() + " partitions");
// Need to use serializable file status here, see HUDI-5936
List>> partitionToFiles =
engineContext.map(new ArrayList<>(partitionPaths),
partitionPathStr -> {
StoragePath partitionPath = new StoragePath(partitionPathStr);
return Pair.of(partitionPathStr,
FSUtils.getAllDataFilesInPartition(getStorage(), partitionPath));
}, parallelism);
return partitionToFiles.stream().collect(Collectors.toMap(pair -> pair.getLeft(),
pair -> pair.getRight()));
}
@Override
public Option getSyncedInstantTime() {
throw new UnsupportedOperationException();
}
@Override
public Option getLatestCompactionTime() {
throw new UnsupportedOperationException();
}
@Override
public void close() throws Exception {
// no-op
}
@Override
public void reset() {
// no-op
}
public Option getBloomFilter(final String partitionName, final String fileName)
throws HoodieMetadataException {
throw new HoodieMetadataException("Unsupported operation: getBloomFilter for " + fileName);
}
@Override
public Map, BloomFilter> getBloomFilters(final List> partitionNameFileNameList)
throws HoodieMetadataException {
throw new HoodieMetadataException("Unsupported operation: getBloomFilters!");
}
@Override
public Map, HoodieMetadataColumnStats> getColumnStats(final List> partitionNameFileNameList, final String columnName)
throws HoodieMetadataException {
throw new HoodieMetadataException("Unsupported operation: getColumnsStats!");
}
@Override
public HoodieData> getRecordsByKeyPrefixes(List keyPrefixes, String partitionName, boolean shouldLoadInMemory) {
throw new HoodieMetadataException("Unsupported operation: getRecordsByKeyPrefixes!");
}
@Override
public Map>> getAllRecordsByKeys(List keys, String partitionName) {
throw new HoodieMetadataException("Unsupported operation: getAllRecordsByKeys!");
}
@Override
public Map> readRecordIndex(List recordKeys) {
throw new HoodieMetadataException("Unsupported operation: readRecordIndex!");
}
@Override
public Map> readSecondaryIndex(List secondaryKeys, String partitionName) {
throw new HoodieMetadataException("Unsupported operation: readSecondaryIndex!");
}
@Override
public int getNumFileGroupsForPartition(MetadataPartitionType partition) {
throw new HoodieMetadataException("Unsupported operation: getNumFileGroupsForPartition");
}
}