
io.jsync.app.core.Manager Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of jsync.io Show documentation
Show all versions of jsync.io Show documentation
jsync.io is a non-blocking, event-driven networking framework for Java
package io.jsync.app.core;
import com.hazelcast.client.HazelcastClient;
import com.hazelcast.client.config.ClientAwsConfig;
import com.hazelcast.client.config.ClientConfig;
import com.hazelcast.client.config.ClientNetworkConfig;
import com.hazelcast.config.*;
import com.hazelcast.core.*;
import com.hazelcast.map.merge.LatestUpdateMapMergePolicy;
import io.jsync.Async;
import io.jsync.AsyncFactory;
import io.jsync.app.core.persistence.DataPersistor;
import io.jsync.app.core.persistence.impl.DynamoDBDataPersistor;
import io.jsync.app.core.persistence.impl.MongoDataPersistor;
import io.jsync.json.JsonObject;
import io.jsync.spi.cluster.impl.HazelcastClusterManager;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
/**
* Manager acts as a conductor/manager for the clustering capabilities in the ClusterApp.
*
* It is mainly used to setup the cluster and all the things needed to create a working cluster.
*/
public class Manager {
private HazelcastInstance hazelcast;
private Async clusteredAsync;
private Logger logger;
private String lifecycleListener;
private String nodeIdentifier = null;
private Cluster cluster = null;
private Set startupHandlers = new LinkedHashSet<>();
protected Manager(Cluster parent) {
if (parent == null) {
throw new NullPointerException();
}
this.cluster = parent;
// Since this is created at the creation of the Cluster we don't want to call parent.config() yet.
this.logger = parent.logger();
}
@Deprecated
public void addReadyHandler(Runnable handler) {
addStartupHandler(handler);
}
@Deprecated
public void removeReadyHandler(Runnable handler) {
removeStartupHandler(handler);
}
public void addStartupHandler(Runnable handler) {
startupHandlers.add(handler);
}
public void removeStartupHandler(Runnable handler) {
if (startupHandlers.contains(handler)) {
startupHandlers.remove(handler);
}
}
public boolean clientMode() {
return cluster.config().rawConfig().getObject("cluster", new JsonObject())
.getString("hazelcast_mode", "node").equals("client");
}
public String nodeId() {
return nodeIdentifier;
}
protected void start() {
logger.info("Manager is initializing Hazelcast.");
Config config = cluster.config();
com.hazelcast.config.Config hazelcastConfig = HazelcastClusterManager.getDefaultConfig();
// IMPORTANT - KEEP HERE
MapConfig persistentMapConfig = new MapConfig();
persistentMapConfig.setName("persistent.*");
persistentMapConfig.setInMemoryFormat(InMemoryFormat.BINARY);
persistentMapConfig.setBackupCount(1);
persistentMapConfig.setEvictionPolicy(EvictionPolicy.NONE);
persistentMapConfig.setMergePolicy(LatestUpdateMapMergePolicy.class.getName());
MapStoreConfig mapStoreConfig = new MapStoreConfig();
mapStoreConfig.setInitialLoadMode(MapStoreConfig.InitialLoadMode.EAGER);
mapStoreConfig.setWriteDelaySeconds(0);
mapStoreConfig.setEnabled(true);
try {
JsonObject clusterConfig = config.rawConfig().getObject("cluster", new JsonObject());
String defaultPersistor = clusterConfig.getString("persistence_type", "mongo");
switch (defaultPersistor) {
case "mongo":
case "io.jsync.app.core.persistence.MongoDataPersistor":
defaultPersistor = MongoDataPersistor.class.getCanonicalName();
break;
case "dynamodb":
case "io.jsync.app.core.persistence.DynamoDBDataPersistor":
defaultPersistor = DynamoDBDataPersistor.class.getCanonicalName();
break;
default:
// By default the MongoDataPersistor is the default DataPersistor
defaultPersistor = MongoDataPersistor.class.getCanonicalName();
break;
}
String persistor = clusterConfig.getString("data_persistor", defaultPersistor);
Class persistorClass = null;
ClassLoader classLoader = getClass().getClassLoader();
Class clazz = classLoader.loadClass(persistor);
if (clazz != null) {
if (DataPersistor.class.isAssignableFrom(clazz)) {
logger.info("Using \"" + persistor + "\" as the default DataPersistor.");
persistorClass = clazz;
} else {
logger.info("\"" + persistor + "\" is not a valid DataPersistor.");
}
}
if (persistorClass == null) {
logger.fatal("Could not find the default DataPersistor.");
}
clusterConfig.removeField("persistence_type");
clusterConfig.putString("data_persistor", persistor);
config.rawConfig().putObject("cluster", clusterConfig);
config.save();
final Class finalPersistorClass = persistorClass;
mapStoreConfig.setFactoryImplementation((MapStoreFactory) (s, properties) -> {
try {
DataPersistor newInstance = (DataPersistor) finalPersistorClass.newInstance();
if (newInstance != null) {
return newInstance;
}
} catch (InstantiationException | IllegalAccessException e) {
e.printStackTrace();
}
return null;
});
} catch (Exception e) {
throw new RuntimeException(e);
}
// -- IMPORTANT --
// We must create a map config for all data that we want to be persistent outside
// of the default data persistence.. This will custom maps to be persisted.
persistentMapConfig.setMapStoreConfig(mapStoreConfig);
hazelcastConfig.addMapConfig(persistentMapConfig);
JsonObject rawConfig = config.rawConfig();
// Read the clusterName and clusterPassword..
GroupConfig groupConfig = hazelcastConfig.getGroupConfig();
groupConfig.setName(config.clusterName());
groupConfig.setPassword(config.clusterPassword());
// TODO add more customizations..
if (rawConfig.containsField("hazelcast")) {
JsonObject jhzConfig =
rawConfig.getObject("hazelcast", new JsonObject());
if (jhzConfig.containsField("group")) {
JsonObject groupOverride = jhzConfig.getObject("group", new JsonObject());
if (groupOverride.containsField("name")) {
groupConfig.setName(groupOverride.getString("name"));
}
if (groupOverride.containsField("password")) {
groupConfig.setName(groupOverride.getString("password"));
}
}
if (jhzConfig.containsField("management_center")) {
JsonObject managementOverride = jhzConfig.getObject("management_center", new JsonObject());
ManagementCenterConfig managementCenterConfig = hazelcastConfig.getManagementCenterConfig();
if (managementOverride.containsField("url")) {
managementCenterConfig.setUrl(managementOverride.getString("url", "http://localhost:8080/mancenter"));
}
if (managementOverride.containsField("enabled")) {
managementCenterConfig.setEnabled(managementOverride.getBoolean("enabled", false));
}
}
}
MemberAttributeConfig memberAttributeConfig = new MemberAttributeConfig();
memberAttributeConfig.setStringAttribute("cluster.member.role", config.clusterRole());
hazelcastConfig.setMemberAttributeConfig(memberAttributeConfig);
// This makes it easier to change the hazelcast mode
if (clientMode()) {
logger.info("Running in CLIENT mode.");
// Pull some things from the original config
// TODO implement ClusterManagerFactory method
ClientConfig clientConfig = new ClientConfig();
clientConfig.setGroupConfig(hazelcastConfig.getGroupConfig());
ClientNetworkConfig clientNetworkConfig = new ClientNetworkConfig();
// We pull the TCP/IP Config from cluster.xml and connect to the IP addresses listed
for (String key : hazelcastConfig.getNetworkConfig().getJoin().getTcpIpConfig().getMembers()) {
clientNetworkConfig.addAddress(key);
}
AwsConfig originalAwsConfig = hazelcastConfig.getNetworkConfig().getJoin().getAwsConfig();
ClientAwsConfig clientAwsConfig = new ClientAwsConfig();
clientAwsConfig.setAccessKey(originalAwsConfig.getAccessKey());
clientAwsConfig.setSecretKey(originalAwsConfig.getSecretKey());
clientAwsConfig.setRegion(originalAwsConfig.getRegion());
clientAwsConfig.setEnabled(originalAwsConfig.isEnabled());
clientAwsConfig.setSecurityGroupName(originalAwsConfig.getSecurityGroupName());
clientAwsConfig.setConnectionTimeoutSeconds(originalAwsConfig.getConnectionTimeoutSeconds());
clientAwsConfig.setHostHeader(originalAwsConfig.getHostHeader());
clientAwsConfig.setTagKey(originalAwsConfig.getTagKey());
clientAwsConfig.setTagValue(originalAwsConfig.getTagValue());
clientNetworkConfig.setAwsConfig(clientAwsConfig);
clientNetworkConfig.setSSLConfig(hazelcastConfig.getNetworkConfig().getSSLConfig());
clientConfig.setNetworkConfig(clientNetworkConfig);
clientConfig.setClassLoader(hazelcastConfig.getClassLoader());
clientConfig.setListenerConfigs(hazelcastConfig.getListenerConfigs());
clientConfig.setSerializationConfig(hazelcastConfig.getSerializationConfig());
hazelcast = HazelcastClient.newHazelcastClient(clientConfig);
// The nodeID is generated locally because we are in client mode..
} else {
logger.info("Using normal hazelcast instance for startup.");
hazelcast = Hazelcast.newHazelcastInstance(hazelcastConfig);
}
// We are calling this so the next clustered async uses this HazelcastInstance
HazelcastClusterManager.setNextInstance(hazelcast);
CountDownLatch clusteredAsyncCreateLatch = new CountDownLatch(1);
// First we need to check if the clusteredAsync instance is not null. If it is we will create a clusteredAsync instance
if (clusteredAsync == null) {
AsyncFactory.newAsync(0, config.clusterHost(), result -> {
if (!result.succeeded()) {
logger.fatal("Failed to create new async instance: " + result.cause());
clusteredAsyncCreateLatch.countDown();
return;
}
clusteredAsync = result.result();
nodeIdentifier = clusteredAsync.clusterManager().getNodeID();
clusteredAsyncCreateLatch.countDown();
});
}
try {
clusteredAsyncCreateLatch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
if (clusteredAsync == null) {
logger.fatal("Manager could not start because the clusteredAsync instance could not be created.");
return;
}
lifecycleListener = hazelcast.getLifecycleService().addLifecycleListener(new LifecycleListener() {
LifecycleEvent.LifecycleState lastState;
@Override
public void stateChanged(LifecycleEvent event) {
if (event.getState() == LifecycleEvent.LifecycleState.SHUTDOWN) {
logger.error("Hazelcast has shut down. We are leaving the cluster..");
cluster.leave();
} else if (event.getState() == LifecycleEvent.LifecycleState.CLIENT_CONNECTED) {
if (lastState == LifecycleEvent.LifecycleState.CLIENT_DISCONNECTED) {
// TODO -
}
}
lastState = event.getState();
}
});
cluster.clusteredAsync = clusteredAsync;
// IT IS SAFE TO SAY WE HAVE CONNECTED WITH THE CLUSTER
if (!clientMode()) {
PartitionService partitionService = hazelcast.getPartitionService();
// We are going to give it five minutes so we can force this memeber of the cluster to be ready
if (!partitionService.isClusterSafe()) {
partitionService.forceLocalMemberToBeSafe(5, TimeUnit.MINUTES);
}
}
// The cluster is now ready! We can start our services after we have executed all startupHandlers
for (Runnable handler : startupHandlers) {
handler.run();
}
cluster.startServices();
logger.info("Manager has started with the nodeId " + nodeId());
}
protected void stop() {
logger.info("Manager is stopping..");
// We want to remove the lifecycleListener so we don't catch that we are shutting ourself down.
if (lifecycleListener != null) {
// We can shut this down.
if (hazelcast.getLifecycleService().isRunning()) {
hazelcast.getLifecycleService().removeLifecycleListener(lifecycleListener);
}
}
cluster.stopServices();
PartitionService partitionService = hazelcast.getPartitionService();
if (!partitionService.isClusterSafe()) {
partitionService.addMigrationListener(new MigrationListener() {
private boolean stopping = false;
private void checkCluster() {
// Check stopping so we don't run it too many times
if (partitionService.isClusterSafe() && !stopping) {
stopping = true;
clusteredAsync.stop();
clusteredAsync = null;
if (hazelcast.getLifecycleService().isRunning()) {
hazelcast.shutdown();
}
logger.info("Manager has stopped..");
}
}
@Override
public void migrationStarted(MigrationEvent migrationEvent) {
// We really don't care about this
checkCluster();
}
@Override
public void migrationCompleted(MigrationEvent migrationEvent) {
checkCluster();
}
@Override
public void migrationFailed(MigrationEvent migrationEvent) {
// We also don't care about this
checkCluster();
}
});
} else {
clusteredAsync.stop();
clusteredAsync = null;
if (hazelcast.getLifecycleService().isRunning()) {
hazelcast.shutdown();
}
logger.info("Manager has stopped..");
}
}
protected HazelcastInstance hazelcast() {
return hazelcast;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy