All Downloads are FREE. Search and download functionalities are using the official Maven repository.

convex.peer.Server Maven / Gradle / Ivy

There is a newer version: 0.7.15
Show newest version
package convex.peer;

import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Consumer;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import convex.api.Convex;
import convex.core.Belief;
import convex.core.Block;
import convex.core.BlockResult;
import convex.core.Constants;
import convex.core.ErrorCodes;
import convex.core.Peer;
import convex.core.Result;
import convex.core.State;
import convex.core.crypto.AKeyPair;
import convex.core.data.ACell;
import convex.core.data.AString;
import convex.core.data.AVector;
import convex.core.data.AccountKey;
import convex.core.data.AccountStatus;
import convex.core.data.Address;
import convex.core.data.Format;
import convex.core.data.Hash;
import convex.core.data.Keyword;
import convex.core.data.Keywords;
import convex.core.data.PeerStatus;
import convex.core.data.Ref;
import convex.core.data.SignedData;
import convex.core.data.Strings;
import convex.core.data.Vectors;
import convex.core.data.prim.CVMLong;
import convex.core.exceptions.BadFormatException;
import convex.core.exceptions.BadSignatureException;
import convex.core.exceptions.InvalidDataException;
import convex.core.exceptions.MissingDataException;
import convex.core.init.Init;
import convex.core.lang.Context;
import convex.core.lang.RT;
import convex.core.lang.Reader;
import convex.core.store.AStore;
import convex.core.store.Stores;
import convex.core.transactions.ATransaction;
import convex.core.transactions.Invoke;
import convex.core.util.Shutdown;
import convex.core.util.Utils;
import convex.net.MessageType;
import convex.net.NIOServer;
import convex.net.message.Message;


/**
 * A self contained server that can be launched with a config.
 *
 * Server creates the following threads:
 * - A ReceiverThread that processes message from the Server's receive Queue
 * - An UpdateThread that handles Belief updates and transaction processing
 * - A ConnectionManager thread, via the ConnectionManager
 *
 * "Programming is a science dressed up as art, because most of us don't
 * understand the physics of software and it's rarely, if ever, taught. The
 * physics of software is not algorithms, data structures, languages, and
 * abstractions. These are just tools we make, use, and throw away. The real
 * physics of software is the physics of people. Specifically, it's about our
 * limitations when it comes to complexity and our desire to work together to
 * solve large problems in pieces. This is the science of programming: make
 * building blocks that people can understand and use easily, and people will
 * work together to solve the very largest problems." ― Pieter Hintjens
 *
 */
public class Server implements Closeable {
	public static final int DEFAULT_PORT = 18888;

	private static final int RECEIVE_QUEUE_SIZE = 10000;

	private static final int EVENT_QUEUE_SIZE = 1000;

	// Maximum Pause for each iteration of Server update loop.
	private static final long SERVER_UPDATE_PAUSE = 5L;

	static final Logger log = LoggerFactory.getLogger(Server.class.getName());

	// private static final Level LEVEL_MESSAGE = Level.FINER;

	/**
	 * Queue for received messages to be processed by this Peer Server
	 */
	private BlockingQueue receiveQueue = new ArrayBlockingQueue(RECEIVE_QUEUE_SIZE);

	/**
	 * Queue for received events (Beliefs, Transactions) to be processed
	 */
	private BlockingQueue> eventQueue = new ArrayBlockingQueue<>(EVENT_QUEUE_SIZE);


	/**
	 * Message consumer that simply enqueues received messages received by this Server
	 */
	Consumer peerReceiveAction = new Consumer() {
		@Override
		public void accept(Message msg) {
			try {
				queueMessage(msg);
			} catch (InterruptedException e) {
				log.warn("Interrupt on peer receive queue!");
			}
		}
	};

	/**
	 * Connection manager instance.
	 */
	protected ConnectionManager manager;

	/**
	 * Store to use for all threads associated with this server instance
	 */
	private final AStore store;

	private final HashMap config;

	/**
	 * Flag for a running server. Setting to false will terminate server threads.
	 */
	private volatile boolean isRunning = false;

	private NIOServer nio;
	private Thread receiverThread = null;
	private Thread updateThread = null;

	/**
	 * The Peer instance current state for this server. Will be updated based on peer events.
	 */
	private Peer peer;

	/**
	 * The Peer Controller Address
	 */
	private Address controller;

	/**
	 * The list of new transactions to be added to the next Block. Accessed only in update loop
	 *
	 * Must all have been fully persisted.
	 */
	private ArrayList> newTransactions = new ArrayList<>();

	/**
	 * The set of queued partial messages pending missing data.
	 *
	 * Delivery will be re-attempted when missing data is provided
	 */
	private HashMap partialMessages = new HashMap();

	/**
	 * The list of new beliefs received from remote peers the block being created
	 * Should only modify with the lock for this Server held.
	 */
	private HashMap> newBeliefs = new HashMap<>();


	/**
	 * Hostname of the peer server.
	 */
	String hostname;

	private IServerEvent eventHook = null;

	private Server(HashMap config) throws TimeoutException, IOException {

		AStore configStore = (AStore) config.get(Keywords.STORE);
		this.store = (configStore == null) ? Stores.current() : configStore;

		// assign the event hook if set
		if (config.containsKey(Keywords.EVENT_HOOK)) {
			Object maybeHook=config.get(Keywords.EVENT_HOOK);
			if (maybeHook instanceof IServerEvent) {
				this.eventHook = (IServerEvent)maybeHook;
			}
		}
		// Switch to use the configured store for setup, saving the caller store
		final AStore savedStore=Stores.current();
		try {
			Stores.setCurrent(store);
			this.config = config;
			// now setup the connection manager
			this.manager = new ConnectionManager(this);

			this.peer = establishPeer();

			establishController();

			nio = NIOServer.create(this, receiveQueue);

		} finally {
			Stores.setCurrent(savedStore);
		}
	}

	/**
	 * Establish the controller Account for this Peer.
	 */
	private void establishController() {
		Address controlAddress=RT.toAddress(getConfig().get(Keywords.CONTROLLER));
		if (controlAddress==null) {
			controlAddress=peer.getController();
			if (controlAddress==null) {
				throw new IllegalStateException("Peer Controller account does not exist for Peer Key: "+peer.getPeerKey());
			}
		}
		AccountStatus as=peer.getConsensusState().getAccount(controlAddress);
		if (as==null) {
			throw new IllegalStateException("Peer Controller Account does not exist: "+controlAddress);
		}
		if (!as.getAccountKey().equals(getKeyPair().getAccountKey())) {
			throw new IllegalStateException("Server keypair does not match keypair for control account: "+controlAddress);
		}
		this.setPeerController(controlAddress);
	}

	@SuppressWarnings("unchecked")
	private Peer establishPeer() throws TimeoutException, IOException {
		log.info("Establishing Peer with store: {}",Stores.current());
		try {
			AKeyPair keyPair = (AKeyPair) getConfig().get(Keywords.KEYPAIR);
			if (keyPair==null) {
				log.warn("No keypair provided for Server, deafulting to generated keypair for testing purposes");
				keyPair=AKeyPair.generate();
				log.warn("Generated keypair with public key: "+keyPair.getAccountKey());
			}

			Object source=getConfig().get(Keywords.SOURCE);
			if (Utils.bool(source)) {
				// Peer sync case
				InetSocketAddress sourceAddr=Utils.toInetSocketAddress(source);
				Convex convex=Convex.connect(sourceAddr);
				log.info("Attempting Peer Sync with: "+sourceAddr);
				long timeout = establishTimeout();
				
				// Sync status and genesis state
				Result result = convex.requestStatusSync(timeout);
				AVector status = result.getValue();
				if (status == null || status.count()!=Constants.STATUS_COUNT) {
					throw new Error("Bad status message from remote Peer");
				}
				Hash beliefHash=RT.ensureHash(status.get(0));
				Hash networkID=RT.ensureHash(status.get(2));
				log.info("Attempting to sync genesis state with network: "+networkID);
				State genF=(State) convex.acquire(networkID).get(timeout,TimeUnit.MILLISECONDS);
				log.info("Retreived Genesis State: "+networkID);
				
				// Belief acquisition
				log.info("Attempting to obtain peer Belief: "+beliefHash);
				SignedData belF=null;
				long timeElapsed=0;
				while (belF==null) {
					try {
						belF=(SignedData) convex.acquire(beliefHash).get(timeout,TimeUnit.MILLISECONDS);
					} catch (TimeoutException te) {
						timeElapsed+=timeout;
						log.info("Still waiting for Belief sync after "+timeElapsed/1000+"s");
					}
				}
				log.info("Retreived Peer Signed Belief: "+beliefHash+ " with memory size: "+belF.getMemorySize());

				Peer peer=Peer.create(keyPair, genF, belF.getValue());
				return peer;

			} else if (Utils.bool(getConfig().get(Keywords.RESTORE))) {
				// Restore from storage case
				try {

					Peer peer = Peer.restorePeer(store, keyPair);
					if (peer != null) {
						log.info("Restored Peer with root data hash: {}",store.getRootHash());
						return peer;
					}
				} catch (Throwable e) {
					log.error("Can't restore Peer from store: {}",e);
				}
			}
			State genesisState = (State) config.get(Keywords.STATE);
			if (genesisState!=null) {
				log.info("Defaulting to standard Peer startup with genesis state: "+genesisState.getHash());
			} else {
				AccountKey peerKey=keyPair.getAccountKey();
				genesisState=Init.createState(List.of(peerKey));
				log.info("Created new genesis state: "+genesisState.getHash()+ " with initial peer: "+peerKey);
			}
			return Peer.createGenesisPeer(keyPair,genesisState);
		} catch (ExecutionException|InterruptedException e) {
			throw Utils.sneakyThrow(e);
		}
	}

	private long establishTimeout() {
		Object maybeTimeout=getConfig().get(Keywords.TIMEOUT);
		if (maybeTimeout==null) return Constants.PEER_SYNC_TIMEOUT;
		Utils.toInt(maybeTimeout);
		return 0;
	}

	/**
	 * Creates a new (unlaunched) Server with a given config.
	 *
	 * @param config Server configuration map. Will be defensively copied.
	 *
	 * @param event Event interface where the server will send information about the peer
	 * @return New Server instance
	 * @throws IOException If an IO Error occurred establishing the Peer
	 * @throws TimeoutException If Peer creation timed out
	 */
	public static Server create(HashMap config) throws TimeoutException, IOException {
		return new Server(new HashMap<>(config));
	}

	/**
	 * Gets the current Belief held by this {@link Server}
	 *
	 * @return Current Belief
	 */
	public Belief getBelief() {
		return peer.getBelief();
	}

	/**
	 * Gets the current Peer data structure for this {@link Server}.
	 *
	 * @return Current Peer data
	 */
	public Peer getPeer() {
		return peer;
	}

	/**
	 * Gets the desired host name for this Peer
	 * @return Hostname String
	 */
	public String getHostname() {
		return hostname;
	}

	/**
	 * Launch the Peer Server, including all main server threads
	 */
	public void launch() {
		AStore savedStore=Stores.current();
		try {
			Stores.setCurrent(store);

			HashMap config = getConfig();

			Object p = config.get(Keywords.PORT);
			Integer port = (p == null) ? null : Utils.toInt(p);

			nio.launch((String)config.get(Keywords.BIND_ADDRESS), port);
			port = nio.getPort(); // Get the actual port (may be auto-allocated)

			if (getConfig().containsKey(Keywords.URL)) {
				hostname = (String) config.get(Keywords.URL);
				log.debug("Setting desired peer URL to: " + hostname);
			} else {
				hostname = null;
			}



			// set running status now, so that loops don't terminate
			isRunning = true;

			// Start connection manager loop
			manager.start();

			receiverThread = new Thread(receiverLoop, "Receive Loop on port: " + port);
			receiverThread.setDaemon(true);
			receiverThread.start();

			// Start Peer update thread
			updateThread = new Thread(beliefMergeLoop, "Update Loop on port: " + port);
			updateThread.setDaemon(true);
			updateThread.start();


			// Close server on shutdown, should be before Etch stores in priority
			Shutdown.addHook(Shutdown.SERVER, new Runnable() {
				@Override
				public void run() {
					close();
				}
			});

			// Connect to source peer if specified
			if (getConfig().containsKey(Keywords.SOURCE)) {
				Object s=getConfig().get(Keywords.SOURCE);
				InetSocketAddress sa=Utils.toInetSocketAddress(s);
				if (sa!=null) {
					if (manager.connectToPeer(sa)!=null) {
						log.debug("Automatically connected to :source peer at: {}",sa);
					} else {
						log.warn("Failed to connect to :source peer at: {}",sa);
					}
				} else {
					log.warn("Failed to parse :source peer address {}",s);
				}
			}

			log.info( "Peer Server started with Peer Address: {}",getPeerKey());
		} catch (Throwable e) {
			close();
			throw new Error("Failed to launch Server", e);
		} finally {
			Stores.setCurrent(savedStore);
		}
	}

	/**
	 * Process a message received from a peer or client. We know at this point that the
	 * message parsed successfully, not much else.....
	 * 
	 * SECURITY: Should anticipate malicious messages
	 *
	 * If the message is partial, will be queued pending delivery of missing data.
	 *
	 * Runs on receiver thread
	 *
	 * @param m
	 */
	private void processMessage(Message m) {
		MessageType type = m.getType();
		log.trace("Processing message {}",type);
		try {
			switch (type) {
			case BELIEF:
				processBelief(m);
				break;
			case CHALLENGE:
				processChallenge(m);
				break;
			case RESPONSE:
				processResponse(m);
				break;
			case COMMAND:
				break;
			case DATA:
				processData(m);
				break;
			case MISSING_DATA:
				processMissingData(m);
				break;
			case QUERY:
				processQuery(m);
				break;
			case RESULT:
				break;
			case TRANSACT:
				processTransact(m);
				break;
			case GOODBYE:
				processClose(m);
				break;
			case STATUS:
				processStatus(m);
				break;
			default:
				Result r=Result.create(m.getID(), Strings.create("Bad Message Type: "+type), ErrorCodes.ARGUMENT);
				m.reportResult(r);
				break;
			}

		} catch (MissingDataException e) {
			Hash missingHash = e.getMissingHash();
			log.trace("Missing data: {} in message of type {}" , missingHash,type);
			try {
				registerPartialMessage(missingHash, m);
				m.sendMissingData(missingHash);
				log.trace("Requested missing data {} for partial message",missingHash);
			} catch (Exception ex) {
				log.warn( "Exception while requesting missing data: {}" + ex);
			}
		} catch (BadFormatException | ClassCastException | NullPointerException e) {
			log.warn("Error processing client message: {}", e);
		}
	}

	/**
	 * Respond to a request for missing data, on a best-efforts basis. Requests for
	 * missing data we do not hold are ignored.
	 *
	 * @param m
	 * @throws BadFormatException
	 */
	private void processMissingData(Message m) throws BadFormatException {
		// payload for a missing data request should be a valid Hash
		Hash h = RT.ensureHash(m.getPayload());
		if (h == null) throw new BadFormatException("Hash required for missing data message");

		Ref r = store.refForHash(h);
		if (r != null) {
			try {
				ACell data = r.getValue();
				boolean sent = m.sendData(data);
				// log.trace( "Sent missing data for hash: {} with type {}",Utils.getClassName(data));
				if (!sent) {
					log.debug("Can't send missing data for hash {} due to full buffer",h);
				}
			} catch (Exception e) {
				log.warn("Unable to deliver missing data for {} due to exception: {}", h, e);
			}
		} else {
			log.debug("Unable to provide missing data for {} from store: {}", h,Stores.current());
		}
	}

	@SuppressWarnings("unchecked")
	private void processTransact(Message m) {
		// query is a vector [id , signed-object]
		AVector v = m.getPayload();
		SignedData sd = (SignedData) v.get(1);

		// System.out.println("transact: "+v);

		// Persist the signed transaction. Might throw MissingDataException?
		// If we already have the transaction persisted, will get signature status
		ACell.createPersisted(sd);

		if (!sd.checkSignature()) {
			// terminate the connection, dishonest client?
			try {
				// TODO: throttle?
				Result r=Result.create(m.getID(), Strings.BAD_SIGNATURE, ErrorCodes.SIGNATURE);
				m.reportResult(r);
			} catch (Exception e) {
				// Ignore?? Connection probably gone anyway
			}
			log.info("Bad signature from Client! {}" , sd);
			return;
		}
		
		if (!(sd.getValue() instanceof ATransaction)) {
			Result r=Result.create(m.getID(), Strings.BAD_FORMAT, ErrorCodes.FORMAT);
			m.reportResult(r);
			return;
		}

		registerInterest(sd.getHash(), m);
		try {
			eventQueue.put(sd);
		} catch (InterruptedException e) {
			log.warn("Unexpected interruption adding transaction to event queue!");
		}
	}

	/**
	 * Called by a remote peer to close connections to the remote peer.
	 *
	 */
	private void processClose(Message m) {
		SignedData signedPeerKey = m.getPayload();
		AccountKey remotePeerKey = RT.ensureAccountKey(signedPeerKey.getValue());
		manager.closeConnection(remotePeerKey);
		raiseServerChange("connection");
	}

	/**
	 * Checks if received data fulfils the requirement for a partial message If so,
	 * process the message again.
	 *
	 * @param hash
	 * @return true if the data request resulted in a re-queued message, false
	 *         otherwise
	 */
	private boolean maybeProcessPartial(Hash hash) {
		Message m;
		synchronized (partialMessages) {
			m = partialMessages.get(hash);

			if (m != null) {
				log.trace( "Attempting to re-queue partial message due to received hash: ",hash);
				if (receiveQueue.offer(m)) {
					partialMessages.remove(hash);
					return true;
				} else {
					log.warn( "Queue full for message with received hash: {}", hash);
				}
			}
		}
		return false;
	}

	/**
	 * Stores a partial message for potential later handling.
	 *
	 * @param missingHash Hash of missing data dependency
	 * @param m           Message to re-attempt later when missing data is received.
	 */
	private void registerPartialMessage(Hash missingHash, Message m) {
		synchronized (partialMessages) {
			log.trace( "Registering partial message with missing hash: " ,missingHash);
			partialMessages.put(missingHash, m);
		}
	}

	/**
	 * Register of client interests in receiving transaction responses
	 */
	private HashMap interests = new HashMap<>();

	/**
	 * Register interest in receiving a result for a transaction
	 * @param signedTransactionHash
	 * @param m
	 */
	private void registerInterest(Hash signedTransactionHash, Message m) {
		interests.put(signedTransactionHash, m);
	}

	/**
	 * Handle general Belief update, taking belief registered in newBeliefs
	 *
	 * @return true if Peer Belief changed, false otherwise
	 * @throws InterruptedException
	 */
	protected boolean maybeUpdateBelief() throws InterruptedException {
		long oldConsensusPoint = peer.getConsensusPoint();

		// possibly have own transactions to publish
		maybePostOwnTransactions();

		// publish new blocks if needed. Guaranteed to change belief if this happens
		boolean published = maybePublishBlock();

		// only do belief merge if needed: either after publishing a new block or with
		// incoming beliefs
		if ((!published) && newBeliefs.isEmpty()) return false;

		// Update Peer timestamp first. This determines what we might accept.
		peer = peer.updateTimestamp(Utils.getCurrentTimestamp());

		boolean updated = maybeMergeBeliefs();
		// Must skip broadcast if we haven't published a new Block or updated our own Order
		if (!(updated||published)) return false;

		// At this point we know our Order should have changed
		final Belief belief = peer.getBelief();

		broadcastBelief(belief);

		// Report transaction results
		long newConsensusPoint = peer.getConsensusPoint();
		if (newConsensusPoint > oldConsensusPoint) {
			log.debug("Consensus point update from {} to {}" ,oldConsensusPoint , newConsensusPoint);
			for (long i = oldConsensusPoint; i < newConsensusPoint; i++) {
				SignedData block = peer.getPeerOrder().getBlock(i);
				BlockResult br = peer.getBlockResult(i);
				reportTransactions(block.getValue(), br);
			}
		}

		return true;
	}

	/**
	 * Time of last belief broadcast
	 */
	private long lastBroadcastBelief=0;
	private long broadcastCount=0L;

	private void broadcastBelief(Belief belief) {
		// At this point we know something updated our belief, so we want to rebroadcast
		// belief to network
		Consumer> noveltyHandler = r -> {
			ACell o = r.getValue();
			if (o == belief) return; // skip sending data for belief cell itself, will be BELIEF payload
			Message msg = Message.createData(o);
            // broadcast to all peers trusted or not
			manager.broadcast(msg, false);
		};

		// persist the state of the Peer, announcing the new Belief
		// (ensure we can handle missing data requests etc.)
		peer=peer.persistState(noveltyHandler);

		// Broadcast latest Belief to connected Peers
		SignedData sb = peer.getSignedBelief();

		Message msg = Message.createBelief(sb);

        // at the moment broadcast to all peers trusted or not TODO: recheck this
		manager.broadcast(msg, false);
		lastBroadcastBelief=Utils.getCurrentTimestamp();
		broadcastCount++;
	}

	/**
	 * Gets the number of belief broadcasts made by this Peer
	 * @return Count of broadcasts from this Server instance
	 */
	public long getBroadcastCount() {
		return broadcastCount;
	}

	private long lastBlockPublishedTime=0L;

	/**
	 * Checks for pending transactions, and if found propose them as a new Block.
	 *
	 * @return True if a new block is published, false otherwise.
	 */
	protected boolean maybePublishBlock() {
		long timestamp=Utils.getCurrentTimestamp();
		// skip if recently published a block
		if ((lastBlockPublishedTime+Constants.MIN_BLOCK_TIME)>timestamp) return false;

		Block block=null;
		int n = newTransactions.size();
		if (n == 0) return false;
		// TODO: smaller block if too many transactions?
		block = Block.create(timestamp, (List>) newTransactions);
		newTransactions.clear();

		ACell.createPersisted(block);

		Peer newPeer = peer.proposeBlock(block);
		log.info("New block proposed: {} transaction(s), hash={}", block.getTransactions().count(), block.getHash());

		peer = newPeer;
		lastBlockPublishedTime=timestamp;
		return true;
	}

	private long lastOwnTransactionTimestamp=0L;

	private static final long OWN_TRANSACTIONS_DELAY=300;

	/**
	 * Gets the Peer controller Address
	 * @return Peer controller Address
	 */
	public Address getPeerController() {
		return controller;
	}

	/**
	 * Sets the Peer controller Address
	 * @param a Peer Controller Address to set
	 */
	public void setPeerController(Address a) {
		controller=a;
	}

	/**
	 * Adds an event to the inbound server event queue. May block.
	 * @param event Signed event to add to inbound event queue
	 * @throws InterruptedException
	 */
	public void queueEvent(SignedData event) throws InterruptedException {
		eventQueue.put(event);
	}
	
	/**
	 * Queues a message for processing by this Server. May block briefly.
	 * @param m Message to queue
	 */
	public void queueMessage(Message m) throws InterruptedException {
		receiveQueue.put(m);
	}

	/**
	 * Check if the Peer want to send any of its own transactions
	 * @param transactionList List of transactions to add to.
	 */
	private void maybePostOwnTransactions() {
		if (!Utils.bool(config.get(Keywords.AUTO_MANAGE))) return;

		State s=getPeer().getConsensusState();
		long ts=Utils.getCurrentTimestamp();

		// If we already did this recently, don't try again
		if (ts<(lastOwnTransactionTimestamp+OWN_TRANSACTIONS_DELAY)) return;

		lastOwnTransactionTimestamp=ts; // mark this timestamp

		String desiredHostname=getHostname(); // Intended hostname
		AccountKey peerKey=getPeerKey();
		PeerStatus ps=s.getPeer(peerKey);
		AString chn=ps.getHostname();
		String currentHostname=(chn==null)?null:chn.toString();

		// Try to set hostname if not correctly set
		trySetHostname:
		if (!Utils.equals(desiredHostname, currentHostname)) {
			log.info("Trying to update own hostname from: {} to {}",currentHostname,desiredHostname);
			Address address=ps.getController();
			if (address==null) break trySetHostname;
			AccountStatus as=s.getAccount(address);
			if (as==null) break trySetHostname;
			if (!Utils.equals(getPeerKey(), as.getAccountKey())) break trySetHostname;

			String code;
			if (desiredHostname==null) {
				code = String.format("(set-peer-data %s {:url nil})", peerKey);
			} else {
				code = String.format("(set-peer-data %s {:url \"%s\"})", peerKey, desiredHostname);
			}
			ACell message = Reader.read(code);
			ATransaction transaction = Invoke.create(address, as.getSequence()+1, message);
			newTransactions.add(getKeyPair().signData(transaction));
		}
	}


	/**
	 * Checks for mergeable remote beliefs, and if found merge and update own
	 * belief.
	 *
	 * @return True if Peer Belief Order was changed, false otherwise.
	 */
	protected boolean maybeMergeBeliefs() {
		try {
			// First get the set of new beliefs for merging
			Belief[] beliefs;
			synchronized (newBeliefs) {
				int n = newBeliefs.size();
				beliefs = new Belief[n];
				int i = 0;
				for (AccountKey addr : newBeliefs.keySet()) {
					beliefs[i++] = newBeliefs.get(addr).getValue();
				}
				newBeliefs.clear();
			}
			Peer newPeer = peer.mergeBeliefs(beliefs);

			// Check for substantive change (i.e. Orders updated, can ignore timestamp)
			if (newPeer.getBelief().getOrders().equals(peer.getBelief().getOrders())) return false;

			log.debug( "New merged Belief update: {}" ,newPeer.getBelief().getHash());
			// we merged successfully, so clear pending beliefs and update Peer
			peer = newPeer;
			return true;
		} catch (MissingDataException e) {
			// Shouldn't happen if beliefs are persisted
			// e.printStackTrace();
			throw new Error("Missing data in belief update: " + e.getMissingHash().toHexString(), e);
		} catch (BadSignatureException e) {
			// Shouldn't happen if Beliefs are already validated
			// e.printStackTrace();
			throw new Error("Bad Signature in belief update!", e);
		} catch (InvalidDataException e) {
			// Shouldn't happen if Beliefs are already validated
			// e.printStackTrace();
			throw new Error("Invalid data in belief update!", e);
		}
	}

	private void processStatus(Message m) {
		try {
			// We can ignore payload

			log.trace( "Processing status request from: {}" ,m.getOriginString());
			// log.log(LEVEL_MESSAGE, "Processing query: " + form + " with address: " +
			// address);

			Peer peer=this.getPeer();
			Hash beliefHash=peer.getSignedBelief().getHash();
			Hash stateHash=peer.getStates().getHash();
			Hash initialStateHash=peer.getStates().get(0).getHash();
			AccountKey peerKey=getPeerKey();
			Hash consensusHash=peer.getConsensusState().getHash();

			AVector reply=Vectors.of(beliefHash,stateHash,initialStateHash,peerKey,consensusHash);

			m.reportResult(m.getID(), reply);
		} catch (Throwable t) {
			log.warn("Status Request Error: {}", t);
		}
	}

	private void processChallenge(Message m) {
		manager.processChallenge(m, peer);
	}

	private void processResponse(Message m) {
		manager.processResponse(m, peer);
	}

	private void processQuery(Message m) {
		try {
			// query is a vector [id , form, address?]
			AVector v = m.getPayload();
			CVMLong id = (CVMLong) v.get(0);
			ACell form = v.get(1);

			// extract the Address, might be null
			Address address = RT.ensureAddress(v.get(2));

			log.debug( "Processing query: {} with address: {}" , form, address);
			// log.log(LEVEL_MESSAGE, "Processing query: " + form + " with address: " +
			// address);
			Context resultContext = peer.executeQuery(form, address);
			
			// Report result back to message sender
			boolean resultReturned= m.reportResult(Result.fromContext(id, resultContext));

			if (!resultReturned) {
				log.warn("Failed to send query result back to client with ID: {}", id);
			}

		} catch (Throwable t) {
			log.warn("Query Error: {}", t);
		}
	}

	private void processData(Message m) {
		ACell payload = m.getPayload();

		// TODO: be smarter about this? hold a per-client queue for a while?
		Ref r = Ref.get(payload);
		r = r.persistShallow();
		Hash payloadHash = r.getHash();

		if (log.isTraceEnabled()) {
			log.trace( "Processing DATA of type: " + Utils.getClassName(payload) + " with hash: "
					+ payloadHash.toHexString() + " and encoding: " + Format.encodedBlob(payload).toHexString());
		}
		// if our data satisfies a missing data object, need to process it
		maybeProcessPartial(r.getHash());
	}

	/**
	 * Process an incoming message that represents a Belief
	 *
	 * @param m
	 */
	private void processBelief(Message m) {
		ACell o = m.getPayload();

		Ref ref = Ref.get(o);
		try {
			// check we can persist the new belief
			// May also pick up cached signature verification if already held
			ref = ref.persist();

			@SuppressWarnings("unchecked")
			SignedData receivedBelief = (SignedData) o;
			receivedBelief.validateSignature();

			// TODO: validate trusted connection?
			// TODO: can drop Beliefs if under pressure?
			
			if (!(receivedBelief.getValue() instanceof Belief)) {
				Result r=Result.create(m.getID(), Strings.BAD_FORMAT, ErrorCodes.FORMAT);
				m.reportResult(r);
				return;
			}

			eventQueue.put(receivedBelief);
		} catch (ClassCastException e) {
			// bad message?
			log.warn("Exception due to bad message from peer? {}" ,e);
		} catch (BadSignatureException e) {
			// we got sent a bad signature.
			// TODO: Probably need to slash peer? but ignore for now
			log.warn("Bad signed belief from peer: " + Utils.print(o));
		} catch (InterruptedException e) {
			throw Utils.sneakyThrow(e);
		}
	}

	/*
	 * Loop to process messages from the receive queue
	 */
	private Runnable receiverLoop = new Runnable() {
		@Override
		public void run() {
			Stores.setCurrent(getStore()); // ensure the loop uses this Server's store

			try {
				log.debug("Reciever thread started for peer at {}", getHostAddress());

				while (isRunning) { // loop until server terminated
					Message m = receiveQueue.poll(100, TimeUnit.MILLISECONDS);
					if (m != null) {
						processMessage(m);
					}
				}

				log.debug("Reciever thread terminated normally for peer {}", this);
			} catch (InterruptedException e) {
				log.debug("Receiver thread interrupted ");
			} catch (Throwable e) {
				log.warn("Receiver thread terminated abnormally! ");
				log.error("Server FAILED: " + e.getMessage());
				e.printStackTrace();
			}
		}
	};

	/*
	 * Runnable loop for managing Server belief merges
	 */
	private final Runnable beliefMergeLoop = new Runnable() {
		@Override
		public void run() {
			Stores.setCurrent(getStore()); // ensure the loop uses this Server's store
			try {
				// loop while the server is running
				while (isRunning) {
					// Try belief update
					boolean beliefUpdated=maybeUpdateBelief();
					if (beliefUpdated) {
						raiseServerChange("consensus");
					}

					long timestamp=Utils.getCurrentTimestamp();

					// Broadcast Belief if changed or otherwise not done recently
					if (beliefUpdated||((lastBroadcastBelief+Constants.MAX_REBROADCAST_DELAY) firstEvent=eventQueue.poll(SERVER_UPDATE_PAUSE, TimeUnit.MILLISECONDS);
		if (firstEvent==null) return;
		ArrayList> allEvents=new ArrayList<>();
		allEvents.add(firstEvent);
		eventQueue.drainTo(allEvents);
		for (SignedData signedEvent: allEvents) {
			ACell event=signedEvent.getValue();
			if (event instanceof ATransaction) {
				SignedData receivedTrans=(SignedData)signedEvent;
				newTransactions.add(receivedTrans);
			} else if (event instanceof Belief) {
				SignedData receivedBelief=(SignedData)signedEvent;
				AccountKey addr = receivedBelief.getAccountKey();
				SignedData current = newBeliefs.get(addr);
				// Make sure the Belief is the latest from a Peer
				if ((current == null) || (current.getValue().getTimestamp() <= receivedBelief.getValue()
						.getTimestamp())) {
					// Add to map of new Beliefs received for each Peer
					newBeliefs.put(addr, receivedBelief);

					// Notify the update thread that there is something new to handle
					log.debug("Valid belief received by peer at {}: {}"
							,getHostAddress(),receivedBelief.getValue().getHash());
				}
			} else {
				log.debug("Unexpected type in event queue! {}",event.getType());
			}
		}
	}

	private void reportTransactions(Block block, BlockResult br) {
		// TODO: consider culling old interests after some time period
		int nTrans = block.length();
		for (long j = 0; j < nTrans; j++) {
			try {
				SignedData t = block.getTransactions().get(j);
				Hash h = t.getHash();
				Message m = interests.get(h);
				if (m != null) {
					ACell id = m.getID();
					log.trace("Returning tranaction result ID {} to {}", id,m.getOriginString());
					Result res = br.getResults().get(j);

					m.reportResult(res);
					
					interests.remove(h);
				}
			} catch (Throwable e) {
				log.warn("Exception while sending Result: ",e);
				// ignore
			}
		}
	}

	/**
	 * Gets the port that this Server is currently accepting connections on
	 * @return Port number
	 */
	public int getPort() {
		return nio.getPort();
	}

	@Override
	public void finalize() {
		close();
	}

	/**
	 * Writes the Peer data to the configured store.
	 * 
	 * Note: Does not flush buffers to disk. 
	 *
	 * This will overwrite any previously persisted peer data.
	 */
	public void persistPeerData() {
		AStore tempStore = Stores.current();
		try {
			Stores.setCurrent(store);
			ACell peerData = peer.toData();
			store.setRootData(peerData);
			log.info( "Stored peer data for Server with hash: {}", peerData.getHash().toHexString());
		} catch (Throwable e) {
			log.warn("Failed to persist peer state when closing server: {}" ,e.getMessage());
		} finally {
			Stores.setCurrent(tempStore);
		}
	}

	@Override
	public void close() {
		// persist peer state if necessary
		if ((peer != null) && Utils.bool(getConfig().get(Keywords.PERSIST))) {
			try {
				persistPeerData();
			} catch (Throwable t) {
				log.warn("Exception persisting peer data: {}", t);
			}
		}

		// TODO: not much point signing this?
		SignedData signedPeerKey = peer.sign(peer.getPeerKey());
		Message msg = Message.createGoodBye(signedPeerKey);

		// broadcast GOODBYE message to all outgoing remote peers
		manager.broadcast(msg, false);

		isRunning = false;
		if (updateThread != null) {
			updateThread.interrupt();
			try {
				updateThread.join(100);
			} catch (InterruptedException e) {
				// Ignore
			}
		}
		if (receiverThread != null) {
			receiverThread.interrupt();
			try {
				receiverThread.join(100);
			} catch (InterruptedException e) {
				// Ignore
			}
		}
		manager.close();
		nio.close();
		// Note we don't do store.close(); because we don't own the store.
	}

	/**
	 * Gets the host address for this Server (including port), or null if closed
	 *
	 * @return Host Address
	 */
	public InetSocketAddress getHostAddress() {
		return nio.getHostAddress();
	}

	/**
	 * Returns the Keypair for this peer server
	 *
	 * SECURITY: Be careful with this!
	 * @return Key pair for Peer
	 */
	public AKeyPair getKeyPair() {
		return getPeer().getKeyPair();
	}

	/**
	 * Gets the public key of the peer account
	 *
	 * @return AccountKey of this Peer
	 */
	public AccountKey getPeerKey() {
		AKeyPair kp = getKeyPair();
		if (kp == null) return null;
		return kp.getAccountKey();
	}

	/**
	 * Gets the Store configured for this Server. A server must consistently use the
	 * same store instance for all Server threads, as values may be shared.
	 *
	 * @return Store instance
	 */
	public AStore getStore() {
		return store;
	}

	/**
	 * Reports a server change event to the registered hook, if any
	 * @param reason Message for server change
	 */
	public void raiseServerChange(String reason) {
		if (eventHook != null) {
			ServerEvent serverEvent = ServerEvent.create(this, reason);
			eventHook.onServerChange(serverEvent);
		}
	}

	public ConnectionManager getConnectionManager() {
		return manager;
	}

	public HashMap getConfig() {
		return config;
	}

	public Consumer getReceiveAction() {
		return peerReceiveAction;
	}

	/**
	 * Sets the desired host name for this Server
	 * @param string Desired host name String, e.g. "my-domain.com:12345"
	 */
	public void setHostname(String string) {
		hostname=string;
	}

	public boolean isLive() {
		return isRunning;
	}
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy