All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.parquet.hadoop.util.H2SeekableInputStream Maven / Gradle / Ivy

There is a newer version: 1.11.9
Show newest version
/*
 *  Licensed to the Apache Software Foundation (ASF) under one
 *  or more contributor license agreements.  See the NOTICE file
 *  distributed with this work for additional information
 *  regarding copyright ownership.  The ASF licenses this file
 *  to you under the Apache License, Version 2.0 (the
 *  "License"); you may not use this file except in compliance
 *  with the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing,
 *  software distributed under the License is distributed on an
 *  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 *  KIND, either express or implied.  See the License for the
 *  specific language governing permissions and limitations
 *  under the License.
 */

package org.apache.parquet.hadoop.util;

import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.parquet.io.DelegatingSeekableInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;

/**
 * SeekableInputStream implementation for FSDataInputStream that implements
 * ByteBufferReadable in Hadoop 2.
 */
class H2SeekableInputStream extends DelegatingSeekableInputStream {

  // Visible for testing
  interface Reader {
    int read(ByteBuffer buf) throws IOException;
  }

  private final FSDataInputStream stream;
  private final Reader reader;

  public H2SeekableInputStream(FSDataInputStream stream) {
    super(stream);
    this.stream = stream;
    this.reader = new H2Reader();
  }

  @Override
  public void close() throws IOException {
    stream.close();
  }

  @Override
  public long getPos() throws IOException {
    return stream.getPos();
  }

  @Override
  public void seek(long newPos) throws IOException {
    stream.seek(newPos);
  }

  @Override
  public void readFully(byte[] bytes, int start, int len) throws IOException {
    stream.readFully(bytes);
  }

  @Override
  public int read(ByteBuffer buf) throws IOException {
    return stream.read(buf);
  }

  @Override
  public void readFully(ByteBuffer buf) throws IOException {
    readFully(reader, buf);
  }

  private class H2Reader implements Reader {
    @Override
    public int read(ByteBuffer buf) throws IOException {
      return stream.read(buf);
    }
  }

  public static void readFully(Reader reader, ByteBuffer buf) throws IOException {
    // unfortunately the Hadoop APIs seem to not have a 'readFully' equivalent for the byteBuffer read
    // calls. The read(ByteBuffer) call might read fewer than byteBuffer.hasRemaining() bytes. Thus we
    // have to loop to ensure we read them.
    while (buf.hasRemaining()) {
      int readCount = reader.read(buf);
      if (readCount == -1) {
        // this is probably a bug in the ParquetReader. We shouldn't have called readFully with a buffer
        // that has more remaining than the amount of data in the stream.
        throw new EOFException("Reached the end of stream. Still have: " + buf.remaining() + " bytes left");
      }
    }
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy