Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* MIT License
*
* Copyright (c) 2002-2021 Mikko Tommila
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package org.apfloat.samples;
import java.io.PrintWriter;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.LinkedList;
import java.util.ArrayList;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.Enumeration;
import java.util.ResourceBundle;
import java.util.MissingResourceException;
import org.apfloat.Apfloat;
import org.apfloat.ApfloatContext;
import org.apfloat.ApfloatMath;
import org.apfloat.ApfloatRuntimeException;
/**
* Calculates pi using a cluster of servers.
* The servers should be running {@link OperationServer}.
*
* The names and ports of the cluster nodes are read from the file
* cluster.properties, or a ResourceBundle
* by the name "cluster". The format of the property file is as
* follows:
*
*
*
* The server addresses are specified as hostname:port. Weights can
* (but don't have to) be assigned to nodes to indicate the relative
* performance of each node, to allow distributing a suitable amount
* of work for each node. For example, weight2 is the
* relative performance of server2 etc. The weights must
* be integers in the range 1...1000.
*
* Guidelines for configuring the servers:
*
*
*
If the machines are not identical, give proper weights to every
* machine. This can improve performance greatly.
*
If the machines are somewhat similar (e.g. same processor but
* different clock frequency), you can calculate the weight roughly
* as clockFrequency * numberOfProcessors. For example,
* a machine with two 1600MHz processors is four times as fast as
* a machine with one 800MHz processor.
*
*
If the machines are very heterogenous, you can benchmark their
* performance by running e.g. {@link PiParallel} with one
* million digits. Remember to specify the correct number of
* CPUs on each machine.
*
Different JVMs can have different performance. For example,
* Sun's Java client VM achieves roughly two thirds of the
* performance of the server VM when running this application.
*
When running {@link OperationServer} on the cluster nodes,
* specify the number of worker threads for each server to be
* the same as the number of CPUs of the machine.
*
Additionally, you should specify the number of processors
* correctly in the apfloat.properties file
* for each cluster server.
*
*
*
* Similarly as with {@link PiParallel}, if some nodes have multiple
* CPUs, to get any performance gain from running many
* threads in parallel, the JVM must be executing native threads.
* If the JVM is running in green threads mode, there is no
* advantage of having multiple threads, as the JVM will in fact
* execute just one thread and divide its time to multiple
* simulated threads.
*
* @version 1.9.0
* @author Mikko Tommila
*/
public class PiDistributed
extends PiParallel
{
/**
* Distributed version of the binary splitting algorithm.
* Uses multiple computers to calculate pi in parallel.
*/
protected static class DistributedBinarySplittingPiCalculator
extends ParallelBinarySplittingPiCalculator
{
/**
* Construct a distributed pi calculator with the specified precision and radix.
*
* @param series The binary splitting series to be used.
*/
public DistributedBinarySplittingPiCalculator(BinarySplittingSeries series)
{
super(series);
}
/**
* Entry point for the distributed binary splitting algorithm.
*
* @param n1 Start term.
* @param n2 End term.
* @param T Algorithm parameter.
* @param Q Algorithm parameter.
* @param P Algorithm parameter.
* @param F Pointer to inverse square root parameter.
* @param nodes The operation executors to be used for the calculation.
*/
public void r(long n1, long n2, ApfloatHolder T, ApfloatHolder Q, ApfloatHolder P, ApfloatHolder F, Node[] nodes)
throws ApfloatRuntimeException
{
if (nodes.length == 1)
{
// End of splitting work between nodes
// Calculate remaining terms on the node
// Splitting of work continues on the server node using multiple threads
if (DEBUG) Pi.err.println("PiDistributed.r(" + n1 + ", " + n2 + ") transferring to server side node " + nodes[0]);
ApfloatHolder[] TQP = nodes[0].execute(() ->
{
// Continue splitting by threads on server side
r(n1, n2, T, Q, P, null);
return new ApfloatHolder[] { T, Q, P };
});
T.setApfloat(TQP[0].getApfloat());
Q.setApfloat(TQP[1].getApfloat());
if (P != null) P.setApfloat(TQP[2].getApfloat());
}
else
{
// Multiple nodes available; split work in ratio of node weights and execute in parallel
// This split is done on the client side
Object[] objs = splitNodes(nodes);
Node[] nodes1 = (Node[]) objs[0],
nodes2 = (Node[]) objs[2];
long weight1 = (Long) objs[1],
weight2 = (Long) objs[3];
long nMiddle = n1 + (n2 - n1) * weight1 / (weight1 + weight2);
ApfloatHolder LT = new ApfloatHolder(),
LQ = new ApfloatHolder(),
LP = new ApfloatHolder();
if (DEBUG) Pi.err.println("PiDistributed.r(" + n1 + ", " + n2 + ") splitting " + formatArray(nodes) + " to r(" + n1 + ", " + nMiddle + ") " + formatArray(nodes1) + ", r(" + nMiddle + ", " + n2 + ") " + formatArray(nodes2));
BackgroundOperation