All Downloads are FREE. Search and download functionalities are using the official Maven repository.

eu.dicodeproject.analysis.hbase.HBaseDocumentProcessor Maven / Gradle / Ivy

Go to download

The examples module provides glue code implementation for extracting common phrases, key word distributions and more from tweets stored on HDFS/HBase. It builds on Mahout for more sophisticated analysis.

The newest version!
/**
 * Copyright (C) 2010, 2011 Neofonie GmbH
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package eu.dicodeproject.analysis.hbase;

import java.io.IOException;
import java.io.StringReader;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.mahout.common.StringTuple;
import org.apache.mahout.vectorizer.DefaultAnalyzer;
import org.apache.mahout.vectorizer.DocumentProcessor;

public final class HBaseDocumentProcessor {
  public static final String ANALYZER_CLASS = "analyzer.class";

  private HBaseDocumentProcessor() {

  }

  private static class HBaseDocumentProcessorMapper extends TableMapper {
    private Analyzer analyzer;

    @Override
    public void setup(Context context) throws IOException, InterruptedException {
      super.setup(context);
      try {
        ClassLoader ccl = Thread.currentThread().getContextClassLoader();
        Class cl = ccl.loadClass(context.getConfiguration().get(DocumentProcessor.ANALYZER_CLASS,
            DefaultAnalyzer.class.getName()));
        analyzer = (Analyzer) cl.newInstance();
      } catch (ClassNotFoundException e) {
        throw new IllegalStateException(e);
      } catch (InstantiationException e) {
        throw new IllegalStateException(e);
      } catch (IllegalAccessException e) {
        throw new IllegalStateException(e);
      }
    }

    @Override
    public void map(ImmutableBytesWritable row, Result values, Mapper.Context context) throws IOException,
        InterruptedException {
      for (KeyValue keyValue : values.list()) {
        String key = new String(keyValue.getKey());
        String value = new String(keyValue.getValue());
        TokenStream stream = analyzer.tokenStream(key, new StringReader(value));
        TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
        StringTuple document = new StringTuple();
        while (stream.incrementToken()) {
          if (termAtt.termLength() > 0) {
            document.add(new String(termAtt.termBuffer(), 0, termAtt.termLength()));
          }
        }
        context.write(new Text(key), document);
      }
    }
  }

  public static void tokenizeDocuments(String table, String family, String column,
      Class analyzerClass, Path output) throws IOException, InterruptedException,
      ClassNotFoundException {
    Configuration conf = HBaseConfiguration.create();
    conf.set(ANALYZER_CLASS, analyzerClass.getName());

    Job job = new Job(conf);
    job.setJarByClass(HBaseDocumentProcessor.class);

    Scan scan = new Scan();
    scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(column));
    TableMapReduceUtil.initTableMapperJob(table, scan, HBaseDocumentProcessorMapper.class, Text.class,
        StringTuple.class, job);

    job.setJobName("HBaseDocumentProcessor::DocumentTokenizer");
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(StringTuple.class);

    FileOutputFormat.setOutputPath(job, output);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);

    job.setNumReduceTasks(0);

    job.waitForCompletion(true);
  }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy