
org.apache.spark.examples.JavaPageRank Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of snappy-spark-examples_2.10 Show documentation
Show all versions of snappy-spark-examples_2.10 Show documentation
SnappyData distributed data store and execution engine
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples;
import scala.Tuple2;
import com.google.common.collect.Iterables;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.api.java.function.PairFunction;
import java.util.ArrayList;
import java.util.List;
import java.util.Iterator;
import java.util.regex.Pattern;
/**
* Computes the PageRank of URLs from an input file. Input file should
* be in format of:
* URL neighbor URL
* URL neighbor URL
* URL neighbor URL
* ...
* where URL and their neighbors are separated by space(s).
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.graphx.lib.PageRank
*/
public final class JavaPageRank {
private static final Pattern SPACES = Pattern.compile("\\s+");
static void showWarning() {
String warning = "WARN: This is a naive implementation of PageRank " +
"and is given as an example! \n" +
"Please use the PageRank implementation found in " +
"org.apache.spark.graphx.lib.PageRank for more conventional use.";
System.err.println(warning);
}
private static class Sum implements Function2 {
@Override
public Double call(Double a, Double b) {
return a + b;
}
}
public static void main(String[] args) throws Exception {
if (args.length < 2) {
System.err.println("Usage: JavaPageRank ");
System.exit(1);
}
showWarning();
SparkConf sparkConf = new SparkConf().setAppName("JavaPageRank");
JavaSparkContext ctx = new JavaSparkContext(sparkConf);
// Loads in input file. It should be in format of:
// URL neighbor URL
// URL neighbor URL
// URL neighbor URL
// ...
JavaRDD lines = ctx.textFile(args[0], 1);
// Loads all URLs from input file and initialize their neighbors.
JavaPairRDD> links = lines.mapToPair(new PairFunction() {
@Override
public Tuple2 call(String s) {
String[] parts = SPACES.split(s);
return new Tuple2(parts[0], parts[1]);
}
}).distinct().groupByKey().cache();
// Loads all URLs with other URL(s) link to from input file and initialize ranks of them to one.
JavaPairRDD ranks = links.mapValues(new Function, Double>() {
@Override
public Double call(Iterable rs) {
return 1.0;
}
});
// Calculates and updates URL ranks continuously using PageRank algorithm.
for (int current = 0; current < Integer.parseInt(args[1]); current++) {
// Calculates URL contributions to the rank of other URLs.
JavaPairRDD contribs = links.join(ranks).values()
.flatMapToPair(new PairFlatMapFunction, Double>, String, Double>() {
@Override
public Iterable> call(Tuple2, Double> s) {
int urlCount = Iterables.size(s._1);
List> results = new ArrayList>();
for (String n : s._1) {
results.add(new Tuple2(n, s._2() / urlCount));
}
return results;
}
});
// Re-calculates URL ranks based on neighbor contributions.
ranks = contribs.reduceByKey(new Sum()).mapValues(new Function() {
@Override
public Double call(Double sum) {
return 0.15 + sum * 0.85;
}
});
}
// Collects all URL ranks and dump them to console.
List> output = ranks.collect();
for (Tuple2,?> tuple : output) {
System.out.println(tuple._1() + " has rank: " + tuple._2() + ".");
}
ctx.stop();
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy