org.apache.hadoop.mapred.InputFormat Maven / Gradle / Ivy
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
/**
* InputFormat
describes the input-specification for a
* Map-Reduce job.
*
* The Map-Reduce framework relies on the InputFormat
of the
* job to:
*
* -
* Validate the input-specification of the job.
*
-
* Split-up the input file(s) into logical {@link InputSplit}s, each of
* which is then assigned to an individual {@link Mapper}.
*
* -
* Provide the {@link RecordReader} implementation to be used to glean
* input records from the logical
InputSplit
for processing by
* the {@link Mapper}.
*
*
*
* The default behavior of file-based {@link InputFormat}s, typically
* sub-classes of {@link FileInputFormat}, is to split the
* input into logical {@link InputSplit}s based on the total size, in
* bytes, of the input files. However, the {@link FileSystem} blocksize of
* the input files is treated as an upper bound for input splits. A lower bound
* on the split size can be set via
*
* mapreduce.input.fileinputformat.split.minsize.
*
* Clearly, logical splits based on input-size is insufficient for many
* applications since record boundaries are to respected. In such cases, the
* application has to also implement a {@link RecordReader} on whom lies the
* responsibilty to respect record-boundaries and present a record-oriented
* view of the logical InputSplit
to the individual task.
*
* @see InputSplit
* @see RecordReader
* @see JobClient
* @see FileInputFormat
* @deprecated Use {@link org.apache.hadoop.mapreduce.InputFormat} instead.
*/
@Deprecated
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface InputFormat {
/**
* Logically split the set of input files for the job.
*
* Each {@link InputSplit} is then assigned to an individual {@link Mapper}
* for processing.
*
* Note: The split is a logical split of the inputs and the
* input files are not physically split into chunks. For e.g. a split could
* be <input-file-path, start, offset> tuple.
*
* @param job job configuration.
* @param numSplits the desired number of splits, a hint.
* @return an array of {@link InputSplit}s for the job.
*/
InputSplit[] getSplits(JobConf job, int numSplits) throws IOException;
/**
* Get the {@link RecordReader} for the given {@link InputSplit}.
*
*
It is the responsibility of the RecordReader
to respect
* record boundaries while processing the logical split to present a
* record-oriented view to the individual task.
*
* @param split the {@link InputSplit}
* @param job the job that this split belongs to
* @return a {@link RecordReader}
*/
RecordReader getRecordReader(InputSplit split,
JobConf job,
Reporter reporter) throws IOException;
}