org.apache.hadoop.mapred.MultiFileInputFormat Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of hadoop-apache Show documentation
Show all versions of hadoop-apache Show documentation
Shaded version of Apache Hadoop for Presto
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
/**
* An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
* in {@link #getSplits(JobConf, int)} method. Splits are constructed from
* the files under the input paths. Each split returned contains nearly
* equal content length.
* Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
* to construct RecordReader
's for MultiFileSplit
's.
* @see MultiFileSplit
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class MultiFileInputFormat
extends FileInputFormat {
@Override
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
Path[] paths = FileUtil.stat2Paths(listStatus(job));
List splits = new ArrayList(Math.min(numSplits, paths.length));
if (paths.length != 0) {
// HADOOP-1818: Manage splits only if there are paths
long[] lengths = new long[paths.length];
long totLength = 0;
for(int i=0; i= goalLength) {
return i - startIndex + 1;
}
}
return lengths.length - startIndex;
}
@Override
public abstract RecordReader getRecordReader(InputSplit split,
JobConf job, Reporter reporter)
throws IOException;
}