Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.serde2.avro;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.rmi.server.UID;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Type;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericData.Fixed;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.util.Utf8;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.common.type.HiveChar;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveVarchar;
import org.apache.hadoop.hive.serde2.objectinspector.StandardUnionObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveDecimalObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
import org.apache.hadoop.io.Writable;
class AvroDeserializer {
private static final Log LOG = LogFactory.getLog(AvroDeserializer.class);
/**
* Set of already seen and valid record readers IDs which doesn't need re-encoding
*/
private final HashSet noEncodingNeeded = new HashSet();
/**
* Map of record reader ID and the associated re-encoder. It contains only the record readers
* that record needs to be re-encoded.
*/
private final HashMap reEncoderCache = new HashMap();
/**
* Flag to print the re-encoding warning message only once. Avoid excessive logging for each
* record encoding.
*/
private static boolean warnedOnce = false;
/**
* When encountering a record with an older schema than the one we're trying
* to read, it is necessary to re-encode with a reader against the newer schema.
* Because Hive doesn't provide a way to pass extra information to the
* inputformat, we're unable to provide the newer schema when we have it and it
* would be most useful - when the inputformat is reading the file.
*
* This is a slow process, so we try to cache as many of the objects as possible.
*/
static class SchemaReEncoder {
private final ByteArrayOutputStream baos = new ByteArrayOutputStream();
private final GenericDatumWriter gdw = new GenericDatumWriter();
private BinaryDecoder binaryDecoder = null;
GenericDatumReader gdr = null;
public SchemaReEncoder(Schema writer, Schema reader) {
gdr = new GenericDatumReader(writer, reader);
}
public GenericRecord reencode(GenericRecord r)
throws AvroSerdeException {
baos.reset();
BinaryEncoder be = EncoderFactory.get().directBinaryEncoder(baos, null);
gdw.setSchema(r.getSchema());
try {
gdw.write(r, be);
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
binaryDecoder = DecoderFactory.defaultFactory().createBinaryDecoder(bais, binaryDecoder);
return gdr.read(r, binaryDecoder);
} catch (IOException e) {
throw new AvroSerdeException("Exception trying to re-encode record to new schema", e);
}
}
}
private List