Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Apfloat arbitrary precision arithmetic library
* Copyright (C) 2002-2017 Mikko Tommila
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package org.apfloat.samples;
import java.io.PrintWriter;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.LinkedList;
import java.util.ArrayList;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.Enumeration;
import java.util.ResourceBundle;
import java.util.MissingResourceException;
import org.apfloat.Apfloat;
import org.apfloat.ApfloatContext;
import org.apfloat.ApfloatMath;
import org.apfloat.ApfloatRuntimeException;
/**
* Calculates pi using a cluster of servers.
* The servers should be running {@link OperationServer}.
*
* The names and ports of the cluster nodes are read from the file
* cluster.properties, or a ResourceBundle
* by the name "cluster". The format of the property file is as
* follows:
*
*
*
* The server addresses are specified as hostname:port. Weights can
* (but don't have to) be assigned to nodes to indicate the relative
* performance of each node, to allow distributing a suitable amount
* of work for each node. For example, weight2 is the
* relative performance of server2 etc. The weights must
* be integers in the range 1...1000.
*
* Guidelines for configuring the servers:
*
*
*
If the machines are not identical, give proper weights to every
* machine. This can improve performance greatly.
*
If the machines are somewhat similar (e.g. same processor but
* different clock frequency), you can calculate the weight roughly
* as clockFrequency * numberOfProcessors. For example,
* a machine with two 1600MHz processors is four times as fast as
* a machine with one 800MHz processor.
*
*
If the machines are very heterogenous, you can benchmark their
* performance by running e.g. {@link PiParallel} with one
* million digits. Remember to specify the correct number of
* CPUs on each machine.
*
Different JVMs can have different performance. For example,
* Sun's Java client VM achieves roughly two thirds of the
* performance of the server VM when running this application.
*
When running {@link OperationServer} on the cluster nodes,
* specify the number of worker threads for each server to be
* the same as the number of CPUs of the machine.
*
Additionally, you should specify the number of processors
* correctly in the apfloat.properties file
* for each cluster server.
*
*
*
* Similarly as with {@link PiParallel}, if some nodes have multiple
* CPUs, to get any performance gain from running many
* threads in parallel, the JVM must be executing native threads.
* If the JVM is running in green threads mode, there is no
* advantage of having multiple threads, as the JVM will in fact
* execute just one thread and divide its time to multiple
* simulated threads.
*
* @version 1.8.2
* @author Mikko Tommila
*/
public class PiDistributed
extends PiParallel
{
/**
* Distributed version of the binary splitting algorithm.
* Uses multiple computers to calculate pi in parallel.
*/
protected static class DistributedBinarySplittingPiCalculator
extends ParallelBinarySplittingPiCalculator
{
/**
* Construct a distributed pi calculator with the specified precision and radix.
*
* @param series The binary splitting series to be used.
*/
public DistributedBinarySplittingPiCalculator(BinarySplittingSeries series)
{
super(series);
}
/**
* Entry point for the distributed binary splitting algorithm.
*
* @param n1 Start term.
* @param n2 End term.
* @param T Algorithm parameter.
* @param Q Algorithm parameter.
* @param P Algorithm parameter.
* @param F Pointer to inverse square root parameter.
* @param nodes The operation executors to be used for the calculation.
*/
public void r(final long n1, final long n2, final ApfloatHolder T, final ApfloatHolder Q, final ApfloatHolder P, final ApfloatHolder F, Node[] nodes)
throws ApfloatRuntimeException
{
if (nodes.length == 1)
{
// End of splitting work between nodes
// Calculate remaining terms on the node
// Splitting of work continues on the server node using multiple threads
if (DEBUG) Pi.err.println("PiDistributed.r(" + n1 + ", " + n2 + ") transferring to server side node " + nodes[0]);
ApfloatHolder[] TQP = nodes[0].execute(new Operation()
{
public ApfloatHolder[] execute()
{
// Continue splitting by threads on server side
r(n1, n2, T, Q, P, null);
return new ApfloatHolder[] { T, Q, P };
}
});
T.setApfloat(TQP[0].getApfloat());
Q.setApfloat(TQP[1].getApfloat());
if (P != null) P.setApfloat(TQP[2].getApfloat());
}
else
{
// Multiple nodes available; split work in ratio of node weights and execute in parallel
// This split is done on the client side
Object[] objs = splitNodes(nodes);
final Node[] nodes1 = (Node[]) objs[0],
nodes2 = (Node[]) objs[2];
long weight1 = (Long) objs[1],
weight2 = (Long) objs[3];
final long nMiddle = n1 + (n2 - n1) * weight1 / (weight1 + weight2);
final ApfloatHolder LT = new ApfloatHolder(),
LQ = new ApfloatHolder(),
LP = new ApfloatHolder();
if (DEBUG) Pi.err.println("PiDistributed.r(" + n1 + ", " + n2 + ") splitting " + formatArray(nodes) + " to r(" + n1 + ", " + nMiddle + ") " + formatArray(nodes1) + ", r(" + nMiddle + ", " + n2 + ") " + formatArray(nodes2));
BackgroundOperation