All Downloads are FREE. Search and download functionalities are using the official Maven repository.

eu.dicodeproject.analysis.wordcount.WordCountDriver Maven / Gradle / Ivy

Go to download

The examples module provides glue code implementation for extracting common phrases, key word distributions and more from tweets stored on HDFS/HBase. It builds on Mahout for more sophisticated analysis.

The newest version!
/**
 * Copyright (C) 2010, 2011 Neofonie GmbH
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package eu.dicodeproject.analysis.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.ToolRunner;
import org.apache.mahout.common.AbstractJob;

import java.io.IOException;
import java.util.Map;

/**
 * Simple "word count" for HBase columns: Aggregates values from a configurable
 * HBase table and column TODO: add Filters, e.g. for language etc.
 */
public final class WordCountDriver extends AbstractJob {

  private String targetWordsConfKey = "targetWords";

  public static void main(String args[]) throws Exception {
    ToolRunner.run(new WordCountDriver(), args);
  }

  @Override
  public int run(String[] args) throws ClassNotFoundException, InterruptedException, IOException {
    addOption("inputTable",   "i",  "The hbase table holding our data.",                            "thtweets");
    addOption("family",       "f",  "The column family holding our data.",                          "d");
    addOption("textColumn",   "c",  "The column qualifier holding our text.",                       "text");
    addOption("timeColumn",   "d",  "The column qualifier holding our creationDate.",               "creationDate");
    addOption("outputTable",  "t",  "The resulting hbase table in which the histogram is written.", "thtweets-wordCounts");
    addOption("targetWords",  "w",  "The comma-separated targetWords that should be searched for.", "Berlin,Paris,Athens");

    Map argMap = parseArguments(args);
    if (argMap == null) {
      return -1;
    }

    String inputTable = argMap.get("--inputTable");
    String dataFamily = argMap.get("--family");
    String textColumn = argMap.get("--textColumn");
    String timeColumn = argMap.get("--timeColumn");
    String outputTable = argMap.get("--outputTable");
    String targetWords = argMap.get("--targetWords");

    checkWords(targetWords); //TODO implement searching for phrases

    boolean success = generateWordHistogramData(inputTable, dataFamily, textColumn, targetWords, timeColumn, outputTable);

    return (success ? 0 : 1);
  }

  private boolean generateWordHistogramData(String inputTable, String dataFamily, String textColumn,
                                            String targetWords, String timeColumn, String outputTable)
      throws IOException, InterruptedException, ClassNotFoundException {

    Configuration conf = HBaseConfiguration.create();
    conf.set(targetWordsConfKey, targetWords.replace(",", " "));  // comma -> whitespace to ensure that analyzer yields separate tokens
    Job job = new Job(conf, "WordCountDriver::GenerateWordCountData");
    job.setJarByClass(WordCountDriver.class);

    Scan scan = new Scan();
    scan.addColumn(Bytes.toBytes(dataFamily), Bytes.toBytes(textColumn));
    scan.addColumn(Bytes.toBytes(dataFamily), Bytes.toBytes(timeColumn));
    scan.setMaxVersions(1);

    TableMapReduceUtil.initTableMapperJob(inputTable, scan, WordCountMapper.class, Text.class, IntWritable.class, job);
    TableMapReduceUtil.initTableReducerJob(outputTable, WordCountReducer.class, job);

    job.setNumReduceTasks(10);

    return job.waitForCompletion(true);
  }

  private void checkWords(String words) {
    for (String word : words.split(",")) {
      if (word.trim().contains(" ")) {
        throw new IllegalArgumentException("only single words allowed as target; got "+word);
      }
    }
  }

}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy